diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index a56866a8bd73..fa33d4c8d7a1 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.29.0", - "prover": "16.5.0", + "core": "25.1.0", + "prover": "17.0.0", "zkstack_cli": "0.1.2" } diff --git a/.github/workflows/build-contract-verifier-template.yml b/.github/workflows/build-contract-verifier-template.yml index e4d04b90410e..1481e542de57 100644 --- a/.github/workflows/build-contract-verifier-template.yml +++ b/.github/workflows/build-contract-verifier-template.yml @@ -101,7 +101,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run pre_download_compilers.sh diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index fe1d23427645..15d4432191dd 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -114,7 +114,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run pre_download_compilers.sh diff --git a/.github/workflows/build-local-node-docker.yml b/.github/workflows/build-local-node-docker.yml index 80142cb6005c..cbb4239b5725 100644 --- a/.github/workflows/build-local-node-docker.yml +++ b/.github/workflows/build-local-node-docker.yml @@ -50,7 +50,6 @@ jobs: - name: start-services run: | - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres diff --git a/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml index b92fb8e81110..30990889caf6 100644 --- a/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml +++ b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml @@ -28,7 +28,6 @@ jobs: - name: Download Setup data run: | gsutil -m rsync -r gs://matterlabs-setup-data-us/${{ inputs.setup_keys_id }} docker/prover-gpu-fri-gar - cp -v docker/prover-gpu-fri-gar/*.bin docker/circuit-prover-gpu-gar/ - name: Login to us-central1 GAR run: | @@ -70,6 +69,10 @@ jobs: --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} + - name: Move Setup data from prover-gpu-fri-gar to circuit-prover-gpu-gar + run: | + mv -v docker/prover-gpu-fri-gar/*.bin docker/circuit-prover-gpu-gar/ + - name: Build and push circuit-prover-gpu-gar uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 with: diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index d6ec61114c7b..91de5dd51ecf 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -45,7 +45,7 @@ jobs: RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" ERA_BELLMAN_CUDA_RELEASE: ${{ inputs.ERA_BELLMAN_CUDA_RELEASE }} CUDA_ARCH: ${{ inputs.CUDA_ARCH }} - runs-on: [ matterlabs-ci-runner-high-performance ] + runs-on: [matterlabs-ci-runner-high-performance] strategy: matrix: component: @@ -56,6 +56,7 @@ jobs: - prover-fri-gateway - prover-job-monitor - proof-fri-gpu-compressor + - prover-autoscaler outputs: protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} steps: @@ -74,7 +75,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run sccache --start-server @@ -91,7 +91,6 @@ jobs: run: | ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^24.key - - name: login to Docker registries if: github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/')) run: | diff --git a/.github/workflows/build-tee-prover-template.yml b/.github/workflows/build-tee-prover-template.yml index 0e5b80d2e3a2..c55e06931247 100644 --- a/.github/workflows/build-tee-prover-template.yml +++ b/.github/workflows/build-tee-prover-template.yml @@ -76,4 +76,3 @@ jobs: docker push "${repo}/${tag}" done done - diff --git a/.github/workflows/build-witness-generator-template.yml b/.github/workflows/build-witness-generator-template.yml index 33d78b3cf2fc..d9493f97cae1 100644 --- a/.github/workflows/build-witness-generator-template.yml +++ b/.github/workflows/build-witness-generator-template.yml @@ -75,7 +75,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run sccache --start-server diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml index 7d75fb224d6e..d57630d3029a 100644 --- a/.github/workflows/ci-common-reusable.yml +++ b/.github/workflows/ci-common-reusable.yml @@ -27,9 +27,8 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - + - name: Install zkstack run: | ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup @@ -39,4 +38,3 @@ jobs: # `zk lint prover` = cargo clippy, which does cargo check behind the scenes, which is a lightweight version of cargo build - name: Lints run: ci_run zkstack dev lint -t rs --check - diff --git a/.github/workflows/ci-core-lint-reusable.yml b/.github/workflows/ci-core-lint-reusable.yml index 53b25835ff57..0d4db601c467 100644 --- a/.github/workflows/ci-core-lint-reusable.yml +++ b/.github/workflows/ci-core-lint-reusable.yml @@ -49,6 +49,8 @@ jobs: ci_run zkstack dev lint -t js --check ci_run zkstack dev lint -t ts --check ci_run zkstack dev lint -t rs --check + ci_run zkstack dev lint -t autocompletion --check + ci_run zkstack dev lint -t rust-toolchain - name: Check Database run: | diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 9aaa476d740d..d76bb776968d 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -8,6 +8,10 @@ on: required: false default: '[{ "zksolc": ["1.3.14", "1.3.16", "1.3.17", "1.3.1", "1.3.7", "1.3.18", "1.3.19", "1.3.21"] } , { "zkvyper": ["1.3.13"] }]' +env: + RUST_BACKTRACE: 1 + PASSED_ENV_VARS: RUST_BACKTRACE + jobs: lint: name: lint @@ -30,6 +34,7 @@ jobs: echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env echo "RUSTC_WRAPPER=sccache" >> .env + echo RUN_CONTRACT_VERIFICATION_TEST=true >> .env # TODO: Remove when we after upgrade of hardhat-plugins - name: pre-download compilers @@ -56,7 +61,7 @@ jobs: - name: Init run: | ci_run run_retried rustup show - + - name: Install zkstack run: | ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup @@ -69,6 +74,9 @@ jobs: - name: Contracts unit tests run: ci_run yarn l1-contracts test + - name: Download compilers for contract verifier tests + run: ci_run zkstack contract-verifier init --zksolc-version=v1.5.3 --zkvyper-version=v1.5.4 --solc-version=0.8.26 --vyper-version=v0.3.10 --era-vm-solc-version=0.8.26-1.0.1 --only --chain era + - name: Rust unit tests run: | ci_run zkstack dev test rust @@ -101,7 +109,7 @@ jobs: - name: Loadtest configuration run: | - echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'NEW' && 21000 || 16000 }} >> .env + echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'NEW' && 30000 || 16000 }} >> .env echo ACCOUNTS_AMOUNT="100" >> .env echo MAX_INFLIGHT_TXS="10" >> .env echo SYNC_API_REQUESTS_LIMIT="15" >> .env @@ -139,10 +147,10 @@ jobs: --base-token-price-denominator 1 \ --set-as-default false \ --ignore-prerequisites \ - --legacy-bridge + --legacy-bridge \ + --evm-emulator false ci_run zkstack ecosystem init --dev --verbose - ci_run zkstack dev contracts --test-contracts # `sleep 60` because we need to wait until server added all the tokens - name: Run server @@ -193,7 +201,7 @@ jobs: run: | ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true ci_run zkstackup -g --local - + - name: Create log directories run: | SERVER_LOGS_DIR=logs/server @@ -254,7 +262,8 @@ jobs: --base-token-price-nominator 1 \ --base-token-price-denominator 1 \ --set-as-default false \ - --ignore-prerequisites + --ignore-prerequisites \ + --evm-emulator false ci_run zkstack chain init \ --deploy-paymaster \ @@ -272,10 +281,11 @@ jobs: --wallet-creation localhost \ --l1-batch-commit-data-generator-mode rollup \ --base-token-address ${{ env.CUSTOM_TOKEN_ADDRESS }} \ - --base-token-price-nominator 3 \ - --base-token-price-denominator 2 \ + --base-token-price-nominator 314 \ + --base-token-price-denominator 1000 \ --set-as-default false \ - --ignore-prerequisites + --ignore-prerequisites \ + --evm-emulator false ci_run zkstack chain init \ --deploy-paymaster \ @@ -296,7 +306,8 @@ jobs: --base-token-price-nominator 1 \ --base-token-price-denominator 1 \ --set-as-default false \ - --ignore-prerequisites + --ignore-prerequisites \ + --evm-emulator false ci_run zkstack chain build-transactions --chain offline_chain --l1-rpc-url http://127.0.0.1:8545 @@ -328,10 +339,11 @@ jobs: --wallet-creation localhost \ --l1-batch-commit-data-generator-mode validium \ --base-token-address ${{ env.CUSTOM_TOKEN_ADDRESS }} \ - --base-token-price-nominator 3 \ - --base-token-price-denominator 2 \ + --base-token-price-nominator 314 \ + --base-token-price-denominator 1000 \ --set-as-default false \ - --ignore-prerequisites + --ignore-prerequisites \ + --evm-emulator false ci_run zkstack chain init \ --deploy-paymaster \ @@ -349,24 +361,39 @@ jobs: run: | ci_run zkstack dev test build + - name: Build tested binaries + run: | + ci_run zkstack server build + ci_run zkstack external-node build + ci_run zkstack contract-verifier build + - name: Initialize Contract verifier run: | ci_run zkstack contract-verifier init --zksolc-version=v1.5.3 --zkvyper-version=v1.5.4 --solc-version=0.8.26 --vyper-version=v0.3.10 --era-vm-solc-version=0.8.26-1.0.1 --only --chain era ci_run zkstack contract-verifier run --chain era &> ${{ env.SERVER_LOGS_DIR }}/contract-verifier-rollup.log & + ci_run zkstack contract-verifier wait --chain era --verbose - name: Run servers run: | + # Override config for part of chains to test the default config as well + ci_run zkstack dev config-writer --path etc/env/file_based/overrides/tests/integration.yaml --chain era + ci_run zkstack dev config-writer --path etc/env/file_based/overrides/tests/integration.yaml --chain validium + ci_run zkstack server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & ci_run zkstack server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & ci_run zkstack server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & ci_run zkstack server --ignore-prerequisites --chain consensus \ - --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher,consensus \ - &> ${{ env.SERVER_LOGS_DIR }}/consensus.log & + --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher,consensus \ + &> ${{ env.SERVER_LOGS_DIR }}/consensus.log & - ci_run sleep 5 + ci_run zkstack server wait --ignore-prerequisites --verbose --chain era + ci_run zkstack server wait --ignore-prerequisites --verbose --chain validium + ci_run zkstack server wait --ignore-prerequisites --verbose --chain custom_token + ci_run zkstack server wait --ignore-prerequisites --verbose --chain consensus - - name: Setup attester committee for the consensus chain + - name: Set up attester committee for the consensus chain run: | + ci_run zkstack consensus wait-for-registry --ignore-prerequisites --verbose --chain consensus ci_run zkstack consensus set-attester-committee --chain consensus --from-genesis &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log - name: Run integration tests @@ -399,12 +426,17 @@ jobs: run: | ci_run ./bin/run_on_all_chains.sh "zkstack dev test recovery --no-deps --no-kill --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - - name: Run external node server + - name: Run external nodes run: | ci_run zkstack external-node run --ignore-prerequisites --chain era &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/rollup.log & ci_run zkstack external-node run --ignore-prerequisites --chain validium &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/validium.log & ci_run zkstack external-node run --ignore-prerequisites --chain custom_token &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/custom_token.log & ci_run zkstack external-node run --ignore-prerequisites --chain consensus --enable-consensus &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/consensus.log & + + ci_run zkstack external-node wait --ignore-prerequisites --verbose --chain era + ci_run zkstack external-node wait --ignore-prerequisites --verbose --chain validium + ci_run zkstack external-node wait --ignore-prerequisites --verbose --chain custom_token + ci_run zkstack external-node wait --ignore-prerequisites --verbose --chain consensus - name: Run integration tests en run: | diff --git a/.github/workflows/ci-prover-e2e.yml b/.github/workflows/ci-prover-e2e.yml index 105ae1f1485d..7d037e0ab73c 100644 --- a/.github/workflows/ci-prover-e2e.yml +++ b/.github/workflows/ci-prover-e2e.yml @@ -29,7 +29,6 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres ./volumes/reth/data docker-compose -f ${RUNNER_COMPOSE_FILE} --profile runner up -d --wait ci_run sccache --start-server @@ -87,7 +86,7 @@ jobs: ci_run zkstack prover run --component=witness-generator --round=all-rounds --docker=false &>prover_logs/witness-generator.log & - name: Run Circuit Prover run: | - ci_run zkstack prover run --component=circuit-prover --witness-vector-generator-count=10 --docker=false &>prover_logs/circuit_prover.log & + ci_run zkstack prover run --component=circuit-prover -l=23 -h=3 --docker=false &>prover_logs/circuit_prover.log & - name: Wait for prover jobs to finish env: DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain @@ -101,7 +100,7 @@ jobs: - name: Kill prover & start compressor run: | sudo ./bin/prover_checkers/kill_prover - + ci_run zkstack prover run --component=compressor --docker=false &>prover_logs/compressor.log & - name: Wait for batch to be executed on L1 env: diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index 6cb9c26d21e7..7f719b2240db 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -27,7 +27,6 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - name: Install zkstack @@ -40,7 +39,9 @@ jobs: ci_run zkstack dev db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} - name: Formatting - run: ci_run bash -c "cd prover && cargo fmt --check" + run: | + ci_run git config --global --add safe.directory /usr/src/zksync + ci_run zkstack dev fmt --check rustfmt unit-tests: runs-on: [ matterlabs-ci-runner-highmem-long ] @@ -66,7 +67,6 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - name: Install zkstack diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2f29fe98f0e6..a4370457fecb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,7 +20,6 @@ jobs: outputs: core: ${{ steps.changed-files.outputs.core_any_changed }} prover: ${{ steps.changed-files.outputs.prover_any_changed }} - zkstack_cli: ${{ steps.changed-files.outputs.zkstack_cli_any_changed }} docs: ${{ steps.changed-files.outputs.docs_any_changed }} all: ${{ steps.changed-files.outputs.all_any_changed }} steps: @@ -178,6 +177,7 @@ jobs: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 action: "build" WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl" + ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/deploy-core-docs.yml b/.github/workflows/deploy-core-docs.yml new file mode 100644 index 000000000000..f01c56f68c3c --- /dev/null +++ b/.github/workflows/deploy-core-docs.yml @@ -0,0 +1,67 @@ +name: Deploy core docs + +on: + push: + branches: + - "main" + tags: + - "core-v*.*.*" + paths: + - 'docs/**' + - '.github/workflows/deploy-core-docs.yml' + pull_request: + paths: + - 'docs/**' + - '.github/workflows/deploy-core-docs.yml' + workflow_dispatch: + inputs: + ref: + description: "Branch, tag or commit to deploy the core docs. If empty, use the ref that triggered the workflow." + required: false + default: "" + version: + type: string + description: "Version of the documentation to deploy" + required: false + default: "latest" + +# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +concurrency: + group: "pages" + cancel-in-progress: false + +jobs: + + deploy-core-docs: + runs-on: ubuntu-latest + permissions: + contents: write + env: + DOCS_DIR: 'docs' + PROJECT: 'core' + ENABLE_TESTS: false + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + ref: ${{ inputs.ref || '' }} + + - name: Extract version from tag + if: startsWith(github.ref, 'refs/tags/') + id: extract-version + shell: 'bash -ex {0}' + run: | + TAG="${{ github.ref_name }}" + VERSION="${TAG#*-}" + echo "version=${VERSION}" >> "${GITHUB_OUTPUT}" + + - name: Deploy core docs + uses: matter-labs/deploy-mdbooks@73f638643d1be948d1002fe5433747f4a3e37a29 # v1 + with: + version: ${{ inputs.version || steps.extract-version.outputs.version || github.ref_name }} + docs-dir: ${{ env.DOCS_DIR }} + github-token: ${{ secrets.GITHUB_TOKEN }} + enable-tests: ${{ env.ENABLE_TESTS }} + project: ${{ env.PROJECT }} + deploy: ${{ github.event_name != 'pull_request' }} diff --git a/.github/workflows/deploy-prover-docs.yml b/.github/workflows/deploy-prover-docs.yml new file mode 100644 index 000000000000..7f797c61cf5a --- /dev/null +++ b/.github/workflows/deploy-prover-docs.yml @@ -0,0 +1,67 @@ +name: Deploy prover docs + +on: + push: + branches: + - "main" + tags: + - "prover-v*.*.*" + paths: + - 'prover/docs/**' + - '.github/workflows/deploy-prover-docs.yml' + pull_request: + paths: + - 'prover/docs/**' + - '.github/workflows/deploy-prover-docs.yml' + workflow_dispatch: + inputs: + ref: + description: "Branch, tag or commit to deploy the prover docs. If empty, use the ref that triggered the workflow." + required: false + default: "" + version: + type: string + description: "Version of the documentation to deploy" + required: false + default: "latest" + +# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +concurrency: + group: "pages" + cancel-in-progress: false + +jobs: + + deploy-prover-docs: + runs-on: ubuntu-latest + permissions: + contents: write + env: + DOCS_DIR: 'prover/docs' + PROJECT: 'prover' + ENABLE_TESTS: false + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + ref: ${{ inputs.ref || '' }} + + - name: Extract version from tag + if: startsWith(github.ref, 'refs/tags/') + id: extract-version + shell: 'bash -ex {0}' + run: | + TAG="${{ github.ref_name }}" + VERSION="${TAG#*-}" + echo "version=${VERSION}" >> "${GITHUB_OUTPUT}" + + - name: Deploy prover docs + uses: matter-labs/deploy-mdbooks@73f638643d1be948d1002fe5433747f4a3e37a29 # v1 + with: + version: ${{ inputs.version || steps.extract-version.outputs.version || github.ref_name }} + docs-dir: ${{ env.DOCS_DIR }} + github-token: ${{ secrets.GITHUB_TOKEN }} + enable-tests: ${{ env.ENABLE_TESTS }} + project: ${{ env.PROJECT }} + deploy: ${{ github.event_name != 'pull_request' }} diff --git a/.github/workflows/new-build-contract-verifier-template.yml b/.github/workflows/new-build-contract-verifier-template.yml index 9b23cda6f02a..7d75f81fb73c 100644 --- a/.github/workflows/new-build-contract-verifier-template.yml +++ b/.github/workflows/new-build-contract-verifier-template.yml @@ -176,7 +176,11 @@ jobs: - name: Download setup key shell: bash run: | - run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + if [ -f "/setup_2^26.key" ]; then + cp '/setup_2^26.key' './setup_2^26.key' + else + run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + fi - name: Set env vars shell: bash @@ -208,7 +212,8 @@ jobs: uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 with: context: . - push: ${{ inputs.action == 'push' }} + load: true + platforms: ${{ matrix.platforms }} file: docker/${{ matrix.components }}/Dockerfile build-args: | SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage @@ -216,23 +221,16 @@ jobs: SCCACHE_GCS_RW_MODE=READ_WRITE RUSTC_WRAPPER=sccache tags: | - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest - matterlabs/${{ matrix.components }}:latest - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest2.0 - matterlabs/${{ matrix.components }}:latest2.0 - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} - matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} - matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} - matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} - matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} - matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + - name: Push docker image + if: ${{ inputs.action == 'push' }} + run: | + docker push us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + docker push matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + + create_manifest: name: Create release manifest runs-on: matterlabs-ci-runner diff --git a/.github/workflows/new-build-core-template.yml b/.github/workflows/new-build-core-template.yml index c4aeb9180fda..ab3854733e62 100644 --- a/.github/workflows/new-build-core-template.yml +++ b/.github/workflows/new-build-core-template.yml @@ -187,7 +187,11 @@ jobs: - name: Download setup key shell: bash run: | - run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + if [ -f "/setup_2^26.key" ]; then + cp '/setup_2^26.key' './setup_2^26.key' + else + run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + fi - name: Set env vars shell: bash @@ -215,35 +219,28 @@ jobs: docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} gcloud auth configure-docker us-docker.pkg.dev -q - - name: Build and push + - name: Build docker image uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 with: context: . - push: ${{ inputs.action == 'push' }} + load: true + platforms: ${{ matrix.platforms }} file: docker/${{ matrix.components }}/Dockerfile build-args: | SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com SCCACHE_GCS_RW_MODE=READ_WRITE RUSTC_WRAPPER=sccache - tags: | - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest - matterlabs/${{ matrix.components }}:latest - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest2.0 - matterlabs/${{ matrix.components }}:latest2.0 - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} - matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} - matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} - matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} - matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} - matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + - name: Push docker image + if: ${{ inputs.action == 'push' }} + run: | + docker push us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + docker push matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + create_manifest: name: Create release manifest runs-on: matterlabs-ci-runner @@ -265,13 +262,11 @@ jobs: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - name: login to Docker registries - shell: bash run: | docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} gcloud auth configure-docker us-docker.pkg.dev -q - name: Create Docker manifest - shell: bash run: | docker_repositories=("matterlabs/${{ matrix.component.name }}" "us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component.name }}") platforms=${{ matrix.component.platform }} diff --git a/.github/workflows/new-build-prover-template.yml b/.github/workflows/new-build-prover-template.yml index 5d42696c0b2a..cb254f602fc5 100644 --- a/.github/workflows/new-build-prover-template.yml +++ b/.github/workflows/new-build-prover-template.yml @@ -97,6 +97,7 @@ jobs: - prover-job-monitor - proof-fri-gpu-compressor - prover-autoscaler + - circuit-prover-gpu steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: @@ -127,7 +128,6 @@ jobs: if: matrix.components == 'proof-fri-gpu-compressor' run: | run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^24.key - # We need to run this only when ERA_BELLMAN_CUDA_RELEASE is not available # In our case it happens only when PR is created from fork - name: Wait for runner IP to be not rate-limited against GH API @@ -153,17 +153,33 @@ jobs: uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 with: context: . - push: ${{ inputs.action == 'push' }} + load: true build-args: | CUDA_ARCH=${{ inputs.CUDA_ARCH }} SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com SCCACHE_GCS_RW_MODE=READ_WRITE RUSTC_WRAPPER=sccache + PROTOCOL_VERSION=${{ env.PROTOCOL_VERSION }} + ERA_BELLMAN_CUDA_RELEASE=${{ inputs.ERA_BELLMAN_CUDA_RELEASE }} file: docker/${{ matrix.components }}/Dockerfile tags: | us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} matterlabs/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest + matterlabs/${{ matrix.components }}:latest + + - name: Push docker image + if: ${{ inputs.action == 'push' }} + run: | + docker push us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} + docker push matterlabs/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} + docker push us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + docker push matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + docker push us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest + docker push matterlabs/${{ matrix.components }}:latest copy-images: name: Copy images between docker registries @@ -190,6 +206,10 @@ jobs: docker buildx imagetools create \ --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} \ us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} + docker buildx imagetools create \ + --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ inputs.image_tag_suffix }} + - name: Login and push to Europe GAR run: | @@ -197,3 +217,6 @@ jobs: docker buildx imagetools create \ --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} \ us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} + docker buildx imagetools create \ + --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ inputs.image_tag_suffix }} diff --git a/.github/workflows/new-build-witness-generator-template.yml b/.github/workflows/new-build-witness-generator-template.yml index 2f1fc0b2dd86..bbd6aee23ed1 100644 --- a/.github/workflows/new-build-witness-generator-template.yml +++ b/.github/workflows/new-build-witness-generator-template.yml @@ -9,6 +9,10 @@ on: description: "DOCKERHUB_TOKEN" required: true inputs: + ERA_BELLMAN_CUDA_RELEASE: + description: "ERA_BELLMAN_CUDA_RELEASE" + type: string + required: true image_tag_suffix: description: "Optional suffix to override tag name generation" type: string @@ -127,7 +131,14 @@ jobs: SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com SCCACHE_GCS_RW_MODE=READ_WRITE RUSTC_WRAPPER=sccache + PROTOCOL_VERSION=${{ env.PROTOCOL_VERSION }} + ERA_BELLMAN_CUDA_RELEASE=${{ inputs.ERA_BELLMAN_CUDA_RELEASE }} + RUST_FLAGS=${{ inputs.WITNESS_GENERATOR_RUST_FLAGS }} file: docker/${{ matrix.components }}/Dockerfile tags: | us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} matterlabs/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest + matterlabs/${{ matrix.components }}:latest diff --git a/.github/workflows/protobuf.yaml b/.github/workflows/protobuf.yaml index 9c2c34186701..c9541167b464 100644 --- a/.github/workflows/protobuf.yaml +++ b/.github/workflows/protobuf.yaml @@ -23,7 +23,7 @@ env: RUSTC_WRAPPER: "sccache" SCCACHE_GHA_ENABLED: "true" RUST_BACKTRACE: "1" - SQLX_OFFLINE: true, + SQLX_OFFLINE: true # github.base_ref -> github.head_ref for pull_request BASE: ${{ github.event.pull_request.base.sha || github.event.before }} # github.event.before -> github.event.after for push @@ -43,7 +43,7 @@ jobs: fetch-depth: 0 # fetches all branches and tags, which is needed to compute the LCA. - name: checkout LCA run: - git checkout $(git merge-base $BASE $HEAD) + git checkout $(git merge-base $BASE $HEAD) --recurse-submodules working-directory: ./before - name: compile before run: cargo check --all-targets @@ -59,6 +59,7 @@ jobs: with: ref: ${{ env.HEAD }} path: after + submodules: recursive - name: compile after run: cargo check --all-targets working-directory: ./after diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index 18708420dab0..eb75ab179b8e 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -61,10 +61,11 @@ jobs: build-push-core-images: name: Build and push images needs: [setup, changed_files] - uses: ./.github/workflows/build-core-template.yml + uses: ./.github/workflows/new-build-core-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} + action: "push" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} @@ -84,10 +85,11 @@ jobs: build-push-contract-verifier: name: Build and push images needs: [setup, changed_files] - uses: ./.github/workflows/build-contract-verifier-template.yml + uses: ./.github/workflows/new-build-contract-verifier-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} + action: "push" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} @@ -95,12 +97,13 @@ jobs: build-push-prover-images: name: Build and push images needs: [setup, changed_files] - uses: ./.github/workflows/build-prover-template.yml + uses: ./.github/workflows/new-build-prover-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} CUDA_ARCH: "60;70;75;80;89" + action: "push" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} @@ -108,13 +111,14 @@ jobs: build-push-witness-generator-image-avx512: name: Build and push prover images with avx512 instructions needs: [setup, changed_files] - uses: ./.github/workflows/build-witness-generator-template.yml + uses: ./.github/workflows/new-build-witness-generator-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} CUDA_ARCH: "60;70;75;80;89" WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl " + action: "push" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/secrets_scanner.yaml b/.github/workflows/secrets_scanner.yaml index fa896bf10561..9bb1ad0a2722 100644 --- a/.github/workflows/secrets_scanner.yaml +++ b/.github/workflows/secrets_scanner.yaml @@ -11,7 +11,7 @@ jobs: with: fetch-depth: 0 - name: TruffleHog OSS - uses: trufflesecurity/trufflehog@0c66d30c1f4075cee1aada2e1ab46dabb1b0071a + uses: trufflesecurity/trufflehog@781157ae368b2218a0a56b889387dd26faa20f97 with: path: ./ base: ${{ github.event.repository.default_branch }} diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index 49830a30cc1e..3520419f1337 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -40,6 +40,8 @@ jobs: echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env echo "RUSTC_WRAPPER=sccache" >> .env + # Set the minimum reported instruction count difference to reduce noise + echo "BENCHMARK_DIFF_THRESHOLD_PERCENT=2" >> .env - name: init run: | @@ -51,8 +53,8 @@ jobs: run: | ci_run zkstackup -g --local ci_run zkstack dev contracts --system-contracts - ci_run cargo bench --package vm-benchmark --bench iai | tee base-iai - ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes || touch base-opcodes + ci_run cargo bench --package vm-benchmark --bench instructions -- --verbose || echo "Instructions benchmark is missing" + ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes - name: checkout PR run: | @@ -60,24 +62,39 @@ jobs: - name: run benchmarks on PR shell: bash + id: comparison run: | ci_run zkstackup -g --local ci_run zkstack dev contracts --system-contracts - ci_run cargo bench --package vm-benchmark --bench iai | tee pr-iai - ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee pr-opcodes || touch pr-opcodes + ci_run cargo bench --package vm-benchmark --bench instructions -- --verbose + ci_run cargo bench --package vm-benchmark --bench instructions -- --print > instructions.log 2>/dev/null + # Output all lines from the benchmark result starting from the "## ..." comparison header. + # Since the output spans multiple lines, we use a heredoc declaration. EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) echo "speedup<<$EOF" >> $GITHUB_OUTPUT - ci_run cargo run --package vm-benchmark --release --bin compare_iai_results base-iai pr-iai base-opcodes pr-opcodes >> $GITHUB_OUTPUT + sed -n '/^## /,$p' instructions.log >> $GITHUB_OUTPUT + echo "$EOF" >> $GITHUB_OUTPUT + + ci_run cargo run --package vm-benchmark --release --bin instruction_counts -- --diff base-opcodes > opcodes.log + echo "opcodes<<$EOF" >> $GITHUB_OUTPUT + sed -n '/^## /,$p' opcodes.log >> $GITHUB_OUTPUT echo "$EOF" >> $GITHUB_OUTPUT - id: comparison - name: Comment on PR uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 + if: steps.comparison.outputs.speedup != '' || steps.comparison.outputs.opcodes != '' with: message: | - ${{ steps.comparison.outputs.speedup == '' && '## No performance difference detected (anymore)' || '## Detected VM performance changes' }} ${{ steps.comparison.outputs.speedup }} + ${{ steps.comparison.outputs.opcodes }} comment_tag: vm-performance-changes mode: recreate - create_if_not_exists: ${{ steps.comparison.outputs.speedup != '' }} + create_if_not_exists: true + - name: Remove PR comment + uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 + if: steps.comparison.outputs.speedup == '' && steps.comparison.outputs.opcodes == '' + with: + comment_tag: vm-performance-changes + message: 'No performance difference detected (anymore)' + mode: delete diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index d336a1472e4a..93d33116794f 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -48,5 +48,5 @@ jobs: ci_run cargo bench --package vm-benchmark --bench oneshot # Run only benches with 1,000 transactions per batch to not spend too much time ci_run cargo bench --package vm-benchmark --bench batch '/1000$' - ci_run cargo bench --package vm-benchmark --bench iai | tee iai-result - ci_run cargo run --package vm-benchmark --bin iai_results_to_prometheus --release < iai-result + ci_run cargo bench --package vm-benchmark --bench instructions -- --verbose + ci_run cargo bench --package vm-benchmark --bench instructions -- --print diff --git a/.gitignore b/.gitignore index 86ed40c70417..adf3b7799618 100644 --- a/.gitignore +++ b/.gitignore @@ -115,6 +115,7 @@ prover/data/keys/setup_* # ZK Stack CLI chains/era/configs/* chains/gateway/* +chains/avail/* configs/* era-observability/ core/tests/ts-integration/deployments-zk diff --git a/Cargo.lock b/Cargo.lock index 774471d3d6c3..79c986c7893a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15,9 +15,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] @@ -101,6 +101,67 @@ version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +[[package]] +name = "alloy-json-abi" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded610181f3dad5810f6ff12d1a99994cf9b42d2fcb7709029352398a5da5ae6" +dependencies = [ + "alloy-primitives", + "alloy-sol-type-parser", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-primitives" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd58d377699e6cfeab52c4a9d28bdc4ef37e2bd235ff2db525071fe37a2e9af5" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more 1.0.0", + "foldhash", + "getrandom", + "hashbrown 0.15.0", + "hex-literal", + "indexmap 2.6.0", + "itoa", + "k256 0.13.4", + "keccak-asm", + "paste", + "proptest", + "rand 0.8.5", + "ruint", + "rustc-hash 2.0.0", + "serde", + "sha3 0.10.8", + "tiny-keccak 2.0.2", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da0822426598f95e45dd1ea32a738dac057529a709ee645fcc516ffa4cbde08f" +dependencies = [ + "arrayvec 0.7.6", + "bytes", +] + +[[package]] +name = "alloy-sol-type-parser" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12c71028bfbfec210e24106a542aad3def7caf1a70e2c05710e92a98481980d3" +dependencies = [ + "serde", + "winnow 0.6.20", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -133,9 +194,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.15" +version = "0.6.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +checksum = "23a1e53f0f5d86382dafe1cf314783b2044280f406e7e1506368220ad11b1338" dependencies = [ "anstyle", "anstyle-parse", @@ -148,43 +209,176 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "8365de52b16c035ff4fcafe0092ba9390540e3e352870ac09933bebcaa2c8c56" [[package]] name = "anstyle-parse" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.4" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.89" +version = "1.0.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8" + +[[package]] +name = "arbitrary" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +dependencies = [ + "derive_arbitrary", +] + +[[package]] +name = "ark-ff" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rustc_version 0.4.1", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote 1.0.37", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote 1.0.37", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "quote 1.0.37", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 1.0.109", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint 0.4.6", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] [[package]] name = "arr_macro" @@ -292,6 +486,20 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "async-compression" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cb8f1d480b0ea3783ab015936d2a55c87e219676f0c0b7dec61494043f21857" +dependencies = [ + "futures-core", + "memchr", + "pin-project-lite", + "tokio", + "zstd", + "zstd-safe", +] + [[package]] name = "async-executor" version = "1.13.1" @@ -406,9 +614,9 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -459,9 +667,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -470,13 +678,13 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -491,9 +699,9 @@ version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -528,17 +736,28 @@ dependencies = [ "winapi", ] +[[package]] +name = "auto_impl" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", +] + [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-lc-rs" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f95446d919226d587817a7d21379e6eb099b97b45110a7f272a444ca5c54070" +checksum = "cdd82dba44d209fddb11c190e0a94b78651f95299598e472215667417a03ff1d" dependencies = [ "aws-lc-sys", "mirai-annotations", @@ -548,11 +767,11 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.21.2" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3ddc4a5b231dd6958b140ff3151b6412b3f4321fab354f399eec8f14b06df62" +checksum = "df7a4168111d7eb622a31b214057b8509c0a7e1794f44c546d742330dc793972" dependencies = [ - "bindgen 0.69.4", + "bindgen 0.69.5", "cc", "cmake", "dunce", @@ -563,18 +782,46 @@ dependencies = [ [[package]] name = "axum" -version = "0.7.6" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +dependencies = [ + "async-trait", + "axum-core 0.3.4", + "bitflags 1.3.2", + "bytes", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.31", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper 0.1.2", + "tower 0.4.13", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f43644eed690f5374f1af436ecd6aea01cd201f6fbdf0178adaf6907afb2cec" +checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" dependencies = [ "async-trait", - "axum-core", + "axum-core 0.4.5", "bytes", "futures-util", "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "itoa", "matchit", @@ -598,9 +845,26 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.4" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6b8ba012a258d63c9adfa28b9ddcf66149da6f986c5b5452e629d5ee64bf00" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", "bytes", @@ -644,6 +908,12 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "base-x" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" + [[package]] name = "base16ct" version = "0.1.1" @@ -697,6 +967,12 @@ dependencies = [ "regex", ] +[[package]] +name = "bech32" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" + [[package]] name = "beef" version = "0.5.2" @@ -708,9 +984,9 @@ dependencies = [ [[package]] name = "bigdecimal" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d712318a27c7150326677b321a5fa91b55f6d9034ffd67f20319e147d40cee" +checksum = "8f850665a0385e070b64c38d2354e6c104c8479c59868d1e48a0c13ee2c7a1c1" dependencies = [ "autocfg", "libm", @@ -741,19 +1017,19 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "regex", - "rustc-hash", + "rustc-hash 1.1.0", "shlex", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] name = "bindgen" -version = "0.69.4" +version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" +checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ "bitflags 2.6.0", "cexpr", @@ -763,20 +1039,20 @@ dependencies = [ "lazycell", "log", "prettyplease", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "regex", - "rustc-hash", + "rustc-hash 1.1.0", "shlex", - "syn 2.0.77", + "syn 2.0.85", "which", ] [[package]] name = "bip39" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" +checksum = "33415e24172c1b7d6066f6d999545375ab8e1d95421d6784bdfff9496f292387" dependencies = [ "bitcoin_hashes", "serde", @@ -801,11 +1077,21 @@ dependencies = [ "serde", ] +[[package]] +name = "bitcoin-internals" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" + [[package]] name = "bitcoin_hashes" -version = "0.11.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" +checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" +dependencies = [ + "bitcoin-internals", + "hex-conservative", +] [[package]] name = "bitflags" @@ -945,7 +1231,7 @@ name = "block_reverter" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "serde_json", "tokio", "zksync_block_reverter", @@ -972,6 +1258,18 @@ dependencies = [ "piper", ] +[[package]] +name = "blockstore" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7679095248a6dc7555fae81154ed1baef264383c16621ef881a219576c72a9be" +dependencies = [ + "cid", + "dashmap 6.1.0", + "multihash", + "thiserror", +] + [[package]] name = "blst" version = "0.3.13" @@ -1033,9 +1331,9 @@ checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" dependencies = [ "once_cell", "proc-macro-crate 3.2.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", "syn_derive", ] @@ -1048,6 +1346,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "build_const" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ae4235e6dac0694637c763029ecea1a2ec9e4e06ec2729bd21ba4d9c863eb7" + [[package]] name = "build_html" version = "2.5.0" @@ -1083,7 +1387,7 @@ version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] @@ -1102,9 +1406,12 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" +checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" +dependencies = [ + "serde", +] [[package]] name = "bytesize" @@ -1142,33 +1449,123 @@ dependencies = [ ] [[package]] -name = "cargo_metadata" -version = "0.14.2" +name = "cargo_metadata" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" +dependencies = [ + "camino", + "cargo-platform", + "semver 1.0.23", + "serde", + "serde_json", +] + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cc" +version = "1.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" +dependencies = [ + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "celestia-proto" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6eb26c852e42015f85f3aed5c3d1472c751b143e2199d0401ebac2f4500b20d" +dependencies = [ + "celestia-tendermint-proto", + "prost 0.12.6", + "prost-build", + "prost-types", + "protox 0.6.1", + "serde", +] + +[[package]] +name = "celestia-tendermint" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" +checksum = "ce8c92a01145f79a0f3ac7c44a43a9b5ee58e8a4c716b56d98833a3848db1afd" dependencies = [ - "camino", - "cargo-platform", - "semver", + "bytes", + "celestia-tendermint-proto", + "digest 0.10.7", + "ed25519", + "ed25519-consensus", + "flex-error", + "futures 0.3.31", + "num-traits", + "once_cell", + "prost 0.12.6", + "prost-types", "serde", + "serde_bytes", "serde_json", + "serde_repr", + "sha2 0.10.8", + "signature 2.2.0", + "subtle", + "subtle-encoding", + "time", + "zeroize", ] [[package]] -name = "cast" -version = "0.3.0" +name = "celestia-tendermint-proto" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +checksum = "9a95746c5221a74d7b913a415fdbb9e7c90e1b4d818dbbff59bddc034cfce2ec" +dependencies = [ + "bytes", + "flex-error", + "num-derive 0.3.3", + "num-traits", + "prost 0.12.6", + "prost-types", + "serde", + "serde_bytes", + "subtle-encoding", + "time", +] [[package]] -name = "cc" -version = "1.1.21" +name = "celestia-types" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" +checksum = "caf52cc4b4cdf73fc07d9eeaea6d27bb39eed81f4bf8c89f01df86ace4e6da10" dependencies = [ - "jobserver", - "libc", - "shlex", + "base64 0.22.1", + "bech32", + "blockstore", + "bytes", + "celestia-proto", + "celestia-tendermint", + "celestia-tendermint-proto", + "cid", + "const_format", + "enum_dispatch", + "leopard-codec", + "libp2p-identity", + "multiaddr", + "multihash", + "nmt-rs", + "ruint", + "serde", + "serde_repr", + "sha2 0.10.8", + "thiserror", + "time", ] [[package]] @@ -1264,6 +1661,18 @@ dependencies = [ "half", ] +[[package]] +name = "cid" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3147d8272e8fa0ccd29ce51194dd98f79ddfb8191ba9e3409884e751798acf3a" +dependencies = [ + "core2", + "multibase", + "multihash", + "unsigned-varint", +] + [[package]] name = "cipher" version = "0.4.4" @@ -1313,14 +1722,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.5" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e67617688c66640c84f9b98ff26d48f7898dca4faeb45241a4f21ec333788e7b" +checksum = "2501cc688ef391013019495ae7035cfd54f86987e36d10f73976ce4c5d413c5a" dependencies = [ "derivative", "serde", - "zk_evm 0.150.5", - "zkevm_circuits 0.150.5", + "zk_evm 0.150.7", + "zkevm_circuits 0.150.7", ] [[package]] @@ -1380,11 +1789,11 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.5" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21017310971d4a051e4a52ad70eed11d1ae69defeca8314f73a3a4bad16705a9" +checksum = "917d27db531fdd98a51e42ea465bc097f48cc849e7fad68d7856087d15125be1" dependencies = [ - "circuit_encodings 0.150.5", + "circuit_encodings 0.150.7", "derivative", "rayon", "serde", @@ -1431,9 +1840,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" dependencies = [ "clap_builder", "clap_derive", @@ -1441,14 +1850,15 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstream", "anstyle", "clap_lex 0.7.2", "strsim 0.11.1", + "terminal_size", ] [[package]] @@ -1458,9 +1868,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -1498,9 +1908,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "combine" @@ -1548,6 +1958,19 @@ dependencies = [ "compile-fmt", ] +[[package]] +name = "const-hex" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0121754e84117e65f9d90648ee6aa4882a6e63110307ab73967a4c5e7e69e586" +dependencies = [ + "cfg-if", + "cpufeatures", + "hex", + "proptest", + "serde", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -1569,7 +1992,7 @@ version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eff1a44b93f47b1bac19a27932f5c591e43d1ba357ee4f61526c8a25603f0eb1" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "unicode-xid 0.2.6", ] @@ -1617,6 +2040,15 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +[[package]] +name = "core2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" +dependencies = [ + "memchr", +] + [[package]] name = "cpufeatures" version = "0.2.14" @@ -1800,7 +2232,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" dependencies = [ "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -1833,7 +2265,7 @@ dependencies = [ "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "rustc_version", + "rustc_version 0.4.1", "subtle", "zeroize", ] @@ -1844,9 +2276,22 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", +] + +[[package]] +name = "curve25519-dalek-ng" +version = "4.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c359b7249347e46fb28804470d071c921156ad62b3eef5d34e2ba867533dec8" +dependencies = [ + "byteorder", + "digest 0.9.0", + "rand_core 0.6.4", + "subtle-ng", + "zeroize", ] [[package]] @@ -1887,7 +2332,7 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "strsim 0.10.0", "syn 1.0.109", @@ -1901,7 +2346,7 @@ checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "strsim 0.10.0", "syn 1.0.109", @@ -1915,10 +2360,10 @@ checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "strsim 0.11.1", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -1951,7 +2396,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -1967,6 +2412,46 @@ dependencies = [ "parking_lot_core", ] +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + +[[package]] +name = "data-encoding" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" + +[[package]] +name = "data-encoding-macro" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" +dependencies = [ + "data-encoding", + "data-encoding-macro-internal", +] + +[[package]] +name = "data-encoding-macro-internal" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" +dependencies = [ + "data-encoding", + "syn 1.0.109", +] + [[package]] name = "debugid" version = "0.8.0" @@ -2014,11 +2499,22 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] +[[package]] +name = "derive_arbitrary" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", +] + [[package]] name = "derive_more" version = "0.99.18" @@ -2026,10 +2522,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "rustc_version", - "syn 2.0.77", + "rustc_version 0.4.1", + "syn 2.0.85", ] [[package]] @@ -2047,9 +2543,9 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", "unicode-xid 0.2.6", ] @@ -2080,6 +2576,15 @@ dependencies = [ "subtle", ] +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + [[package]] name = "dirs-next" version = "2.0.0" @@ -2090,6 +2595,18 @@ dependencies = [ "dirs-sys-next", ] +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + [[package]] name = "dirs-sys-next" version = "0.1.2" @@ -2101,6 +2618,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", +] + [[package]] name = "dotenvy" version = "0.15.7" @@ -2125,6 +2653,12 @@ version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" +[[package]] +name = "dyn-clone" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" + [[package]] name = "ecdsa" version = "0.14.8" @@ -2161,6 +2695,19 @@ dependencies = [ "signature 2.2.0", ] +[[package]] +name = "ed25519-consensus" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c8465edc8ee7436ffea81d21a019b16676ee3db267aa8d5a8d729581ecf998b" +dependencies = [ + "curve25519-dalek-ng", + "hex", + "rand_core 0.6.4", + "sha2 0.9.9", + "zeroize", +] + [[package]] name = "ed25519-dalek" version = "2.1.1" @@ -2266,9 +2813,9 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.34" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ "cfg-if", ] @@ -2280,9 +2827,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" dependencies = [ "once_cell", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -2432,12 +2979,33 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + [[package]] name = "fastrand" version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec 0.7.6", + "auto_impl", + "bytes", +] + [[package]] name = "ff" version = "0.12.1" @@ -2510,11 +3078,21 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "flex-error" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c606d892c9de11507fa0dcffc116434f94e105d0bbdc4e405b61519464c49d7b" +dependencies = [ + "eyre", + "paste", +] + [[package]] name = "flume" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095" dependencies = [ "futures-core", "futures-sink", @@ -2527,6 +3105,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" + [[package]] name = "foreign-types" version = "0.3.2" @@ -2551,6 +3135,136 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "foundry-compilers" +version = "0.11.6" +source = "git+https://github.com/Moonsong-Labs/compilers.git?rev=7c69695e5c75451f158dd2456bf8c94a7492ea0b#7c69695e5c75451f158dd2456bf8c94a7492ea0b" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "auto_impl", + "derivative", + "dirs", + "dyn-clone", + "foundry-compilers-artifacts", + "foundry-compilers-core", + "fs4 0.8.4", + "fs_extra", + "futures-util", + "home", + "itertools 0.13.0", + "md-5", + "once_cell", + "path-slash", + "rand 0.8.5", + "rayon", + "reqwest 0.12.9", + "semver 1.0.23", + "serde", + "serde_json", + "sha2 0.10.8", + "solang-parser", + "svm-rs", + "svm-rs-builds", + "tempfile", + "thiserror", + "tokio", + "tracing", + "walkdir", + "winnow 0.6.20", + "yansi", +] + +[[package]] +name = "foundry-compilers-artifacts" +version = "0.11.6" +source = "git+https://github.com/Moonsong-Labs/compilers.git?rev=7c69695e5c75451f158dd2456bf8c94a7492ea0b#7c69695e5c75451f158dd2456bf8c94a7492ea0b" +dependencies = [ + "foundry-compilers-artifacts-solc", + "foundry-compilers-artifacts-vyper", + "foundry-compilers-artifacts-zksolc", +] + +[[package]] +name = "foundry-compilers-artifacts-solc" +version = "0.11.6" +source = "git+https://github.com/Moonsong-Labs/compilers.git?rev=7c69695e5c75451f158dd2456bf8c94a7492ea0b#7c69695e5c75451f158dd2456bf8c94a7492ea0b" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "foundry-compilers-core", + "futures-util", + "md-5", + "path-slash", + "rayon", + "semver 1.0.23", + "serde", + "serde_json", + "serde_repr", + "thiserror", + "tokio", + "tracing", + "walkdir", + "yansi", +] + +[[package]] +name = "foundry-compilers-artifacts-vyper" +version = "0.11.6" +source = "git+https://github.com/Moonsong-Labs/compilers.git?rev=7c69695e5c75451f158dd2456bf8c94a7492ea0b#7c69695e5c75451f158dd2456bf8c94a7492ea0b" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "foundry-compilers-artifacts-solc", + "foundry-compilers-core", + "path-slash", + "semver 1.0.23", + "serde", +] + +[[package]] +name = "foundry-compilers-artifacts-zksolc" +version = "0.11.6" +source = "git+https://github.com/Moonsong-Labs/compilers.git?rev=7c69695e5c75451f158dd2456bf8c94a7492ea0b#7c69695e5c75451f158dd2456bf8c94a7492ea0b" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "foundry-compilers-artifacts-solc", + "foundry-compilers-core", + "md-5", + "path-slash", + "rayon", + "semver 1.0.23", + "serde", + "serde_json", + "thiserror", + "tracing", + "walkdir", + "yansi", +] + +[[package]] +name = "foundry-compilers-core" +version = "0.11.6" +source = "git+https://github.com/Moonsong-Labs/compilers.git?rev=7c69695e5c75451f158dd2456bf8c94a7492ea0b#7c69695e5c75451f158dd2456bf8c94a7492ea0b" +dependencies = [ + "alloy-primitives", + "cfg-if", + "dunce", + "fs_extra", + "once_cell", + "path-slash", + "regex", + "semver 1.0.23", + "serde", + "serde_json", + "svm-rs", + "tempfile", + "thiserror", + "tokio", + "walkdir", +] + [[package]] name = "fraction" version = "0.15.3" @@ -2604,7 +3318,7 @@ dependencies = [ "itertools 0.10.5", "lazy_static", "num-bigint 0.4.6", - "num-derive", + "num-derive 0.2.5", "num-integer", "num-traits", "rand 0.4.6", @@ -2617,6 +3331,26 @@ dependencies = [ "zksync_bellman", ] +[[package]] +name = "fs4" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7e180ac76c23b45e767bd7ae9579bc0bb458618c4bc71835926e098e61d15f8" +dependencies = [ + "rustix", + "windows-sys 0.52.0", +] + +[[package]] +name = "fs4" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8c6b3bd49c37d2aa3f3f2220233b29a7cd23f79d1fe70e5337d25fb390793de" +dependencies = [ + "rustix", + "windows-sys 0.52.0", +] + [[package]] name = "fs_extra" version = "1.3.0" @@ -2643,9 +3377,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -2658,9 +3392,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -2668,15 +3402,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -2697,9 +3431,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" @@ -2716,26 +3450,26 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -2749,9 +3483,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures 0.1.31", "futures-channel", @@ -2782,8 +3516,8 @@ name = "genesis_generator" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", - "futures 0.3.30", + "clap 4.5.20", + "futures 0.3.31", "serde", "serde_json", "serde_yaml", @@ -2798,7 +3532,6 @@ dependencies = [ "zksync_protobuf", "zksync_protobuf_config", "zksync_types", - "zksync_utils", ] [[package]] @@ -2836,9 +3569,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" @@ -2916,7 +3649,7 @@ dependencies = [ "google-cloud-token", "home", "jsonwebtoken", - "reqwest 0.12.7", + "reqwest 0.12.9", "serde", "serde_json", "thiserror", @@ -2932,7 +3665,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04f945a208886a13d07636f38fb978da371d0abc3e34bad338124b9f8c135a8f" dependencies = [ - "reqwest 0.12.7", + "reqwest 0.12.9", "thiserror", "tokio", ] @@ -2957,7 +3690,7 @@ dependencies = [ "percent-encoding", "pkcs8 0.10.2", "regex", - "reqwest 0.12.7", + "reqwest 0.12.9", "reqwest-middleware", "ring", "serde", @@ -2985,8 +3718,8 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19775995ee20209163239355bc3ad2f33f83da35d9ef72dea26e5af753552c87" dependencies = [ - "dashmap", - "futures 0.3.30", + "dashmap 5.5.3", + "futures 0.3.31", "futures-timer", "no-std-compat", "nonzero_ext", @@ -3030,7 +3763,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -3049,7 +3782,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -3075,7 +3808,7 @@ dependencies = [ "log", "pest", "pest_derive", - "quick-error", + "quick-error 2.0.1", "serde", "serde_json", ] @@ -3100,6 +3833,18 @@ dependencies = [ "serde", ] +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", + "serde", +] + [[package]] name = "hashlink" version = "0.9.1" @@ -3156,6 +3901,21 @@ name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] + +[[package]] +name = "hex-conservative" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20" + +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" [[package]] name = "hkdf" @@ -3274,9 +4034,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -3299,7 +4059,7 @@ dependencies = [ "crossbeam-utils", "form_urlencoded", "futures-util", - "hyper 0.14.30", + "hyper 0.14.31", "lazy_static", "levenshtein", "log", @@ -3320,9 +4080,9 @@ checksum = "f58b778a5761513caf593693f8951c97a5b610841e754788400f32102eefdff1" [[package]] name = "hyper" -version = "0.14.30" +version = "0.14.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" +checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" dependencies = [ "bytes", "futures-channel", @@ -3344,9 +4104,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" dependencies = [ "bytes", "futures-channel", @@ -3371,7 +4131,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.30", + "hyper 0.14.31", "log", "rustls 0.21.12", "rustls-native-certs 0.6.3", @@ -3387,14 +4147,28 @@ checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "log", - "rustls 0.23.13", + "rustls 0.23.16", + "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", "tower-service", + "webpki-roots", +] + +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper 0.14.31", + "pin-project-lite", + "tokio", + "tokio-io-timeout", ] [[package]] @@ -3403,7 +4177,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "pin-project-lite", "tokio", @@ -3417,7 +4191,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.30", + "hyper 0.14.31", "native-tls", "tokio", "tokio-native-tls", @@ -3431,7 +4205,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "native-tls", "tokio", @@ -3441,16 +4215,16 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", "http-body 1.0.1", - "hyper 1.4.1", + "hyper 1.5.0", "pin-project-lite", "socket2", "tokio", @@ -3458,12 +4232,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "iai" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71a816c97c42258aa5834d07590b718b4c9a598944cd39a52dc25b351185d678" - [[package]] name = "iana-time-zone" version = "0.1.61" @@ -3550,11 +4318,17 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + [[package]] name = "indexmap" version = "1.9.3" @@ -3567,12 +4341,13 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", + "serde", ] [[package]] @@ -3592,9 +4367,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.40.0" +version = "1.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6593a41c7a73841868772495db7dc1e8ecab43bb5c0b6da2059246c4b506ab60" +checksum = "a1f72d3e19488cf7d8ea52d2fc0f8754fc933398b337cd3cbdb28aaeb35159ef" dependencies = [ "console", "lazy_static", @@ -3614,9 +4389,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "ipnetwork" @@ -3706,9 +4481,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" dependencies = [ "wasm-bindgen", ] @@ -3777,7 +4552,7 @@ dependencies = [ "http 1.1.0", "jsonrpsee-core 0.23.2", "pin-project", - "rustls 0.23.13", + "rustls 0.23.16", "rustls-pki-types", "rustls-platform-verifier", "soketto 0.8.0", @@ -3801,10 +4576,10 @@ dependencies = [ "beef", "futures-timer", "futures-util", - "hyper 0.14.30", + "hyper 0.14.31", "jsonrpsee-types 0.21.0", "pin-project", - "rustc-hash", + "rustc-hash 1.1.0", "serde", "serde_json", "thiserror", @@ -3832,7 +4607,7 @@ dependencies = [ "parking_lot", "pin-project", "rand 0.8.5", - "rustc-hash", + "rustc-hash 1.1.0", "serde", "serde_json", "thiserror", @@ -3849,7 +4624,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b7de9f3219d95985eb77fd03194d7c1b56c19bce1abfcc9d07462574b15572" dependencies = [ "async-trait", - "hyper 0.14.30", + "hyper 0.14.31", "hyper-rustls 0.24.2", "jsonrpsee-core 0.21.0", "jsonrpsee-types 0.21.0", @@ -3871,12 +4646,12 @@ dependencies = [ "async-trait", "base64 0.22.1", "http-body 1.0.1", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-rustls 0.27.3", "hyper-util", "jsonrpsee-core 0.23.2", "jsonrpsee-types 0.23.2", - "rustls 0.23.13", + "rustls 0.23.16", "rustls-platform-verifier", "serde", "serde_json", @@ -3895,9 +4670,9 @@ checksum = "7895f186d5921065d96e16bd795e5ca89ac8356ec423fafc6e3d7cf8ec11aee4" dependencies = [ "heck 0.5.0", "proc-macro-crate 3.2.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -3911,7 +4686,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "jsonrpsee-core 0.23.2", "jsonrpsee-types 0.23.2", @@ -4028,6 +4803,16 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "keccak-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + [[package]] name = "kv-log-macro" version = "1.0.7" @@ -4051,7 +4836,7 @@ dependencies = [ "petgraph", "pico-args", "regex", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", "string_cache", "term", "tiny-keccak 2.0.2", @@ -4065,7 +4850,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "507460a910eb7b32ee961886ff48539633b788a36b65692b95f225b844c82553" dependencies = [ - "regex-automata 0.4.7", + "regex-automata 0.4.8", ] [[package]] @@ -4089,6 +4874,17 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" +[[package]] +name = "leopard-codec" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee58dbc414bd23885d7da915e0457618b36d1fc950a6169ef2cb29829d1b1a1d" +dependencies = [ + "bytes", + "lazy_static", + "thiserror", +] + [[package]] name = "levenshtein" version = "1.0.5" @@ -4097,9 +4893,9 @@ checksum = "db13adb97ab515a3691f56e4dbab09283d0b86cb45abd991d8634a9d6f501760" [[package]] name = "libc" -version = "0.2.159" +version = "0.2.161" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" +checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" [[package]] name = "libloading" @@ -4113,9 +4909,24 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.8" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" + +[[package]] +name = "libp2p-identity" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "55cca1eb2bc1fd29f099f3daaab7effd01e1a54b7c577d0ed082521034d912e8" +dependencies = [ + "bs58", + "hkdf", + "multihash", + "quick-protobuf", + "sha2 0.10.8", + "thiserror", + "tracing", +] [[package]] name = "libredox" @@ -4232,13 +5043,13 @@ dependencies = [ "anyhow", "async-trait", "envy", - "futures 0.3.30", + "futures 0.3.31", "hex", "num", "once_cell", "rand 0.8.5", "regex", - "reqwest 0.12.7", + "reqwest 0.12.9", "serde", "serde_json", "static_assertions", @@ -4247,10 +5058,10 @@ dependencies = [ "tracing", "vise", "zksync_config", - "zksync_contracts", "zksync_eth_client", "zksync_eth_signer", "zksync_system_constants", + "zksync_test_contracts", "zksync_types", "zksync_utils", "zksync_vlog", @@ -4267,6 +5078,12 @@ dependencies = [ "scopeguard", ] +[[package]] +name = "lockfree-object-pool" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9374ef4228402d4b7e403e5838cb880d9ee663314b0a900d5a6aabf0c213552e" + [[package]] name = "log" version = "0.4.22" @@ -4282,7 +5099,16 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c000ca4d908ff18ac99b93a062cb8958d331c3220719c52e77cb19cc6ac5d2c1" dependencies = [ - "logos-derive", + "logos-derive 0.13.0", +] + +[[package]] +name = "logos" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c6b6e02facda28ca5fb8dbe4b152496ba3b1bd5a4b40bb2b1b2d8ad74e0f39b" +dependencies = [ + "logos-derive 0.14.2", ] [[package]] @@ -4293,10 +5119,25 @@ checksum = "dc487311295e0002e452025d6b580b77bb17286de87b57138f3b5db711cded68" dependencies = [ "beef", "fnv", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "regex-syntax 0.6.29", - "syn 2.0.77", + "syn 2.0.85", +] + +[[package]] +name = "logos-codegen" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b32eb6b5f26efacd015b000bfc562186472cd9b34bdba3f6b264e2a052676d10" +dependencies = [ + "beef", + "fnv", + "lazy_static", + "proc-macro2 1.0.89", + "quote 1.0.37", + "regex-syntax 0.8.5", + "syn 2.0.85", ] [[package]] @@ -4305,16 +5146,25 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbfc0d229f1f42d790440136d941afd806bc9e949e2bcb8faa813b0f00d1267e" dependencies = [ - "logos-codegen", + "logos-codegen 0.13.0", +] + +[[package]] +name = "logos-derive" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e5d0c5463c911ef55624739fc353238b4e310f0144be1f875dc42fec6bfd5ec" +dependencies = [ + "logos-codegen 0.14.2", ] [[package]] name = "lru" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.15.0", ] [[package]] @@ -4378,7 +5228,7 @@ name = "merkle_tree_consistency_checker" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "tracing", "zksync_config", "zksync_env_config", @@ -4406,21 +5256,44 @@ version = "5.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59bb584eaeeab6bd0226ccf3509a69d7936d148cf3d036ad350abe35e8c6856e" dependencies = [ - "miette-derive", + "miette-derive 5.10.0", "once_cell", "thiserror", "unicode-width", ] +[[package]] +name = "miette" +version = "7.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4edc8853320c2a0dab800fbda86253c8938f6ea88510dc92c5f1ed20e794afc1" +dependencies = [ + "cfg-if", + "miette-derive 7.2.0", + "thiserror", + "unicode-width", +] + [[package]] name = "miette-derive" version = "5.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49e7bc1560b95a3c4a25d03de42fe76ca718ab92d1a22a55b9b4cf67b3ae635c" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", +] + +[[package]] +name = "miette-derive" +version = "7.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcf09caffaac8068c346b6df2a7fc27a177fd20b39421a39ce0a211bde679a6c" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", ] [[package]] @@ -4447,7 +5320,7 @@ checksum = "c325dfab65f261f386debee8b0969da215b3fa0037e74c8a1234db7ba986d803" dependencies = [ "crossbeam-channel", "crossbeam-utils", - "dashmap", + "dashmap 5.5.3", "skeptic", "smallvec", "tagptr", @@ -4504,6 +5377,46 @@ dependencies = [ "version_check", ] +[[package]] +name = "multiaddr" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe6351f60b488e04c1d21bc69e56b89cb3f5e8f5d22557d6e8031bdfd79b6961" +dependencies = [ + "arrayref", + "byteorder", + "data-encoding", + "libp2p-identity", + "multibase", + "multihash", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint", + "url", +] + +[[package]] +name = "multibase" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" +dependencies = [ + "base-x", + "data-encoding", + "data-encoding-macro", +] + +[[package]] +name = "multihash" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc41f430805af9d1cf4adae4ed2149c759b877b01d909a1f40256188d09345d2" +dependencies = [ + "core2", + "unsigned-varint", +] + [[package]] name = "multimap" version = "0.10.0" @@ -4545,6 +5458,18 @@ dependencies = [ "libc", ] +[[package]] +name = "nmt-rs" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e408e823bdc9b4bb525a61b44e846239833a8f9bd86c03a43e4ca314a5497582" +dependencies = [ + "borsh", + "bytes", + "serde", + "sha2 0.10.8", +] + [[package]] name = "no-std-compat" version = "0.4.1" @@ -4669,6 +5594,17 @@ dependencies = [ "syn 0.15.44", ] +[[package]] +name = "num-derive" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 1.0.109", +] + [[package]] name = "num-integer" version = "0.1.46" @@ -4756,9 +5692,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.3.1", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -4768,25 +5704,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ "proc-macro-crate 3.2.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] name = "object" -version = "0.36.4" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "oorandom" @@ -4802,9 +5738,9 @@ checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl" -version = "0.10.66" +version = "0.10.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" dependencies = [ "bitflags 2.6.0", "cfg-if", @@ -4821,9 +5757,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -4834,9 +5770,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.103" +version = "0.9.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" dependencies = [ "cc", "libc", @@ -4880,7 +5816,7 @@ dependencies = [ "bytes", "http 1.1.0", "opentelemetry", - "reqwest 0.12.7", + "reqwest 0.12.9", ] [[package]] @@ -4897,10 +5833,10 @@ dependencies = [ "opentelemetry-proto", "opentelemetry_sdk", "prost 0.13.3", - "reqwest 0.12.7", + "reqwest 0.12.9", "thiserror", "tokio", - "tonic", + "tonic 0.12.3", ] [[package]] @@ -4912,7 +5848,7 @@ dependencies = [ "opentelemetry", "opentelemetry_sdk", "prost 0.13.3", - "tonic", + "tonic 0.12.3", ] [[package]] @@ -4942,6 +5878,12 @@ dependencies = [ "tokio-stream", ] +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + [[package]] name = "ordered-float" version = "2.10.1" @@ -5007,7 +5949,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ "proc-macro-crate 3.2.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] @@ -5047,6 +5989,49 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +[[package]] +name = "path-slash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e91099d4268b0e11973f036e885d652fb0b21fedcf69738c627f94db6a44f42" + +[[package]] +name = "pbjson" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1030c719b0ec2a2d25a5df729d6cff1acf3cc230bf766f4f97833591f7577b90" +dependencies = [ + "base64 0.21.7", + "serde", +] + +[[package]] +name = "pbjson-build" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2580e33f2292d34be285c5bc3dba5259542b083cfad6037b6d70345f24dcb735" +dependencies = [ + "heck 0.4.1", + "itertools 0.11.0", + "prost 0.12.6", + "prost-types", +] + +[[package]] +name = "pbjson-types" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18f596653ba4ac51bdecbb4ef6773bc7f56042dc13927910de1684ad3d32aa12" +dependencies = [ + "bytes", + "chrono", + "pbjson", + "pbjson-build", + "prost 0.12.6", + "prost-build", + "serde", +] + [[package]] name = "pbkdf2" version = "0.12.2" @@ -5089,9 +6074,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.13" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" +checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", "thiserror", @@ -5100,9 +6085,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.13" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d3a6e3394ec80feb3b6393c725571754c6188490265c61aaf260810d6b95aa0" +checksum = "d214365f632b123a47fd913301e14c946c61d1c183ee245fa76eb752e59a02dd" dependencies = [ "pest", "pest_generator", @@ -5110,43 +6095,85 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.13" +version = "2.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94429506bde1ca69d1b5601962c73f4172ab4726571a59ea95931218cb0e930e" +checksum = "eb55586734301717aea2ac313f50b2eb8f60d2fc3dc01d190eefa2e625f60c4e" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", +] + +[[package]] +name = "pest_meta" +version = "2.7.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b75da2a70cf4d9cb76833c990ac9cd3923c9a8905a8929789ce347c84564d03d" +dependencies = [ + "once_cell", + "pest", + "sha2 0.10.8", +] + +[[package]] +name = "petgraph" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +dependencies = [ + "fixedbitset", + "indexmap 2.6.0", +] + +[[package]] +name = "phf" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +dependencies = [ + "phf_macros", + "phf_shared 0.11.2", +] + +[[package]] +name = "phf_generator" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +dependencies = [ + "phf_shared 0.11.2", + "rand 0.8.5", ] [[package]] -name = "pest_meta" -version = "2.7.13" +name = "phf_macros" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac8a071862e93690b6e34e9a5fb8e33ff3734473ac0245b27232222c4906a33f" +checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" dependencies = [ - "once_cell", - "pest", - "sha2 0.10.8", + "phf_generator", + "phf_shared 0.11.2", + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", ] [[package]] -name = "petgraph" -version = "0.6.5" +name = "phf_shared" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" dependencies = [ - "fixedbitset", - "indexmap 2.5.0", + "siphasher 0.3.11", ] [[package]] name = "phf_shared" -version = "0.10.0" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" dependencies = [ "siphasher 0.3.11", ] @@ -5159,29 +6186,29 @@ checksum = "5be167a7af36ee22fe3115051bc51f6e6c7054c9348e28deb4f49bd6f705a315" [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -5336,12 +6363,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.22" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ - "proc-macro2 1.0.86", - "syn 2.0.77", + "proc-macro2 1.0.89", + "syn 2.0.85", ] [[package]] @@ -5393,7 +6420,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", "version_check", @@ -5405,7 +6432,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "version_check", ] @@ -5427,9 +6454,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" dependencies = [ "unicode-ident", ] @@ -5452,9 +6479,29 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", +] + +[[package]] +name = "proptest" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.6.0", + "lazy_static", + "num-traits", + "rand 0.8.5", + "rand_chacha", + "rand_xorshift", + "regex-syntax 0.8.5", + "rusty-fork", + "tempfile", + "unarray", ] [[package]] @@ -5494,7 +6541,7 @@ dependencies = [ "prost 0.12.6", "prost-types", "regex", - "syn 2.0.77", + "syn 2.0.85", "tempfile", ] @@ -5506,9 +6553,9 @@ checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", "itertools 0.12.1", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -5519,9 +6566,9 @@ checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" dependencies = [ "anyhow", "itertools 0.13.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -5531,8 +6578,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "057237efdb71cf4b3f9396302a3d6599a92fa94063ba537b66130980ea9909f3" dependencies = [ "base64 0.21.7", - "logos", - "miette", + "logos 0.13.0", + "miette 5.10.0", "once_cell", "prost 0.12.6", "prost-types", @@ -5540,6 +6587,19 @@ dependencies = [ "serde-value", ] +[[package]] +name = "prost-reflect" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f5eec97d5d34bdd17ad2db2219aabf46b054c6c41bd5529767c9ce55be5898f" +dependencies = [ + "logos 0.14.2", + "miette 7.2.0", + "once_cell", + "prost 0.12.6", + "prost-types", +] + [[package]] name = "prost-types" version = "0.12.6" @@ -5556,11 +6616,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00bb76c5f6221de491fe2c8f39b106330bbd9762c6511119c07940e10eb9ff11" dependencies = [ "bytes", - "miette", + "miette 5.10.0", + "prost 0.12.6", + "prost-reflect 0.12.0", + "prost-types", + "protox-parse 0.5.0", + "thiserror", +] + +[[package]] +name = "protox" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac532509cee918d40f38c3e12f8ef9230f215f017d54de7dd975015538a42ce7" +dependencies = [ + "bytes", + "miette 7.2.0", "prost 0.12.6", - "prost-reflect", + "prost-reflect 0.13.1", "prost-types", - "protox-parse", + "protox-parse 0.6.1", "thiserror", ] @@ -5570,8 +6645,20 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b4581f441c58863525a3e6bec7b8de98188cf75239a56c725a3e7288450a33f" dependencies = [ - "logos", - "miette", + "logos 0.13.0", + "miette 5.10.0", + "prost-types", + "thiserror", +] + +[[package]] +name = "protox-parse" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6c33f43516fe397e2f930779d720ca12cd057f7da4cd6326a0ef78d69dee96" +dependencies = [ + "logos 0.14.2", + "miette 7.2.0", "prost-types", "thiserror", ] @@ -5591,7 +6678,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] @@ -5623,6 +6710,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quick-error" version = "2.0.1" @@ -5638,6 +6731,54 @@ dependencies = [ "byteorder", ] +[[package]] +name = "quinn" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" +dependencies = [ + "bytes", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash 2.0.0", + "rustls 0.23.16", + "socket2", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "quinn-proto" +version = "0.11.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" +dependencies = [ + "bytes", + "rand 0.8.5", + "ring", + "rustc-hash 2.0.0", + "rustls 0.23.16", + "slab", + "thiserror", + "tinyvec", + "tracing", +] + +[[package]] +name = "quinn-udp" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" +dependencies = [ + "libc", + "once_cell", + "socket2", + "tracing", + "windows-sys 0.59.0", +] + [[package]] name = "quote" version = "0.6.13" @@ -5653,7 +6794,7 @@ version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", ] [[package]] @@ -5684,6 +6825,7 @@ dependencies = [ "libc", "rand_chacha", "rand_core 0.6.4", + "serde", ] [[package]] @@ -5720,6 +6862,15 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "rand_xoshiro" version = "0.6.0" @@ -5769,9 +6920,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "355ae415ccd3a04315d3f8246e86d67689ea74d88d915576e1589a351062a13b" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] @@ -5789,14 +6940,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -5810,13 +6961,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -5827,9 +6978,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "rend" @@ -5854,7 +7005,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.30", + "hyper 0.14.31", "hyper-tls 0.5.0", "ipnet", "js-sys", @@ -5882,10 +7033,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.7" +version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ + "async-compression", "base64 0.22.1", "bytes", "encoding_rs", @@ -5896,7 +7048,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-rustls 0.27.3", "hyper-tls 0.6.0", "hyper-util", @@ -5909,7 +7061,11 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile 2.1.3", + "quinn", + "rustls 0.23.16", + "rustls-native-certs 0.8.0", + "rustls-pemfile 2.2.0", + "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", @@ -5917,6 +7073,8 @@ dependencies = [ "system-configuration 0.6.1", "tokio", "tokio-native-tls", + "tokio-rustls 0.26.0", + "tokio-socks", "tokio-util", "tower-service", "url", @@ -5924,6 +7082,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", + "webpki-roots", "windows-registry", ] @@ -5936,7 +7095,7 @@ dependencies = [ "anyhow", "async-trait", "http 1.1.0", - "reqwest 0.12.7", + "reqwest 0.12.9", "serde", "thiserror", "tower-service", @@ -6003,6 +7162,15 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "rkyv" version = "0.7.45" @@ -6027,7 +7195,7 @@ version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] @@ -6078,6 +7246,36 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ruint" +version = "1.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp", + "num-bigint 0.4.6", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand 0.8.5", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + [[package]] name = "rust_decimal" version = "1.36.0" @@ -6106,26 +7304,44 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hash" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +dependencies = [ + "rand 0.8.5", +] + [[package]] name = "rustc-hex" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + [[package]] name = "rustc_version" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver", + "semver 1.0.23", ] [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "aa260229e6538e52293eeb577aabd09945a09d6d9cc0fc550ed7529056c2e32a" dependencies = [ "bitflags 2.6.0", "errno", @@ -6162,9 +7378,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.13" +version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" +checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "aws-lc-rs", "log", @@ -6195,7 +7411,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ "openssl-probe", - "rustls-pemfile 2.1.3", + "rustls-pemfile 2.2.0", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.2.0", "rustls-pki-types", "schannel", "security-framework", @@ -6212,19 +7441,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" [[package]] name = "rustls-platform-verifier" @@ -6237,7 +7465,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.13", + "rustls 0.23.16", "rustls-native-certs 0.7.3", "rustls-platform-verifier-android", "rustls-webpki 0.102.8", @@ -6277,9 +7505,21 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" + +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error 1.2.3", + "tempfile", + "wait-timeout", +] [[package]] name = "ruzstd" @@ -6341,7 +7581,7 @@ checksum = "d3475108a1b62c7efd1b5c65974f30109a598b2f45f23c9ae030acb9686966db" dependencies = [ "darling 0.14.4", "proc-macro-crate 1.3.1", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] @@ -6369,20 +7609,20 @@ checksum = "995491f110efdc6bea96d6a746140e32bfceb4ea47510750a5467295a4707a25" dependencies = [ "darling 0.14.4", "proc-macro-crate 1.3.1", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] [[package]] name = "scale-info" -version = "2.11.3" +version = "2.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" +checksum = "1aa7ffc1c0ef49b0452c6e2986abf2b07743320641ffd5fc63d552458e3b779b" dependencies = [ "bitvec", "cfg-if", - "derive_more 0.99.18", + "derive_more 1.0.0", "parity-scale-codec", "scale-info-derive", "serde", @@ -6390,14 +7630,14 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.3" +version = "2.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" +checksum = "46385cc24172cf615450267463f937c10072516359b3ff1cb24228a4a08bf951" dependencies = [ "proc-macro-crate 3.2.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 1.0.109", + "syn 2.0.85", ] [[package]] @@ -6406,10 +7646,10 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00860983481ac590ac87972062909bef0d6a658013b592ccc0f2feb272feab11" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "scale-info", - "syn 2.0.77", + "syn 2.0.85", "thiserror", ] @@ -6435,9 +7675,9 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" +checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" dependencies = [ "windows-sys 0.59.0", ] @@ -6568,7 +7808,7 @@ name = "selector_generator" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", + "clap 4.5.20", "ethabi", "glob", "hex", @@ -6577,6 +7817,15 @@ dependencies = [ "tokio", ] +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + [[package]] name = "semver" version = "1.0.23" @@ -6586,6 +7835,15 @@ dependencies = [ "serde", ] +[[package]] +name = "semver-parser" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] + [[package]] name = "send_wrapper" version = "0.4.0" @@ -6632,7 +7890,7 @@ dependencies = [ "hostname", "libc", "os_info", - "rustc_version", + "rustc_version 0.4.1", "sentry-core", "uname", ] @@ -6708,9 +7966,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.210" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" dependencies = [ "serde_derive", ] @@ -6736,20 +7994,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.210" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "itoa", "memchr", @@ -6777,6 +8035,17 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_repr" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.85", +] + [[package]] name = "serde_spanned" version = "0.6.8" @@ -6817,7 +8086,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] @@ -6828,7 +8097,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "itoa", "ryu", "serde", @@ -6916,6 +8185,16 @@ dependencies = [ "keccak", ] +[[package]] +name = "sha3-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +dependencies = [ + "cc", + "cfg-if", +] + [[package]] name = "sha3_ce" version = "0.10.6" @@ -6970,6 +8249,12 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + [[package]] name = "simdutf8" version = "0.1.5" @@ -7162,7 +8447,7 @@ name = "snapshots_creator" version = "0.1.0" dependencies = [ "anyhow", - "futures 0.3.30", + "futures 0.3.31", "rand 0.8.5", "structopt", "test-casing", @@ -7189,7 +8474,7 @@ dependencies = [ "chacha20poly1305", "curve25519-dalek", "rand_core 0.6.4", - "rustc_version", + "rustc_version 0.4.1", "sha2 0.10.8", "subtle", ] @@ -7212,7 +8497,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "futures 0.3.30", + "futures 0.3.31", "httparse", "log", "rand 0.8.5", @@ -7227,7 +8512,7 @@ checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" dependencies = [ "base64 0.22.1", "bytes", - "futures 0.3.30", + "futures 0.3.31", "http 1.1.0", "httparse", "log", @@ -7235,6 +8520,20 @@ dependencies = [ "sha1", ] +[[package]] +name = "solang-parser" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c425ce1c59f4b154717592f0bdf4715c3a1d55058883622d3157e1f0908a5b26" +dependencies = [ + "itertools 0.11.0", + "lalrpop", + "lalrpop-util", + "phf", + "thiserror", + "unicode-xid 0.2.6", +] + [[package]] name = "sp-core-hashing" version = "15.0.0" @@ -7330,7 +8629,7 @@ dependencies = [ "hashbrown 0.14.5", "hashlink", "hex", - "indexmap 2.5.0", + "indexmap 2.6.0", "ipnetwork", "log", "memchr", @@ -7357,11 +8656,11 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cac0692bcc9de3b073e8d747391827297e075c7710ff6276d9f7a1f3d58c6657" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "sqlx-core", "sqlx-macros-core", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -7375,7 +8674,7 @@ dependencies = [ "heck 0.5.0", "hex", "once_cell", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "serde", "serde_json", @@ -7384,7 +8683,7 @@ dependencies = [ "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", - "syn 2.0.77", + "syn 2.0.85", "tempfile", "tokio", "url", @@ -7523,7 +8822,7 @@ dependencies = [ "new_debug_unreachable", "once_cell", "parking_lot", - "phf_shared", + "phf_shared 0.10.0", "precomputed-hash", ] @@ -7575,7 +8874,7 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] @@ -7596,10 +8895,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "rustversion", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -7608,6 +8907,21 @@ version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +[[package]] +name = "subtle-encoding" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dcb1ed7b8330c5eed5441052651dd7a12c75e2ed88f2ec024ae1fa3a5e59945" +dependencies = [ + "zeroize", +] + +[[package]] +name = "subtle-ng" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" + [[package]] name = "subxt" version = "0.34.0" @@ -7620,7 +8934,7 @@ dependencies = [ "derivative", "either", "frame-metadata 16.0.0", - "futures 0.3.30", + "futures 0.3.31", "hex", "impl-serde", "instant", @@ -7655,12 +8969,12 @@ dependencies = [ "hex", "jsonrpsee 0.21.0", "parity-scale-codec", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "scale-info", "scale-typegen", "subxt-metadata", - "syn 2.0.77", + "syn 2.0.85", "thiserror", "tokio", ] @@ -7671,7 +8985,7 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecec7066ba7bc0c3608fcd1d0c7d9584390990cd06095b6ae4f114f74c4b8550" dependencies = [ - "futures 0.3.30", + "futures 0.3.31", "futures-util", "serde", "serde_json", @@ -7694,7 +9008,7 @@ dependencies = [ "quote 1.0.37", "scale-typegen", "subxt-codegen", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -7725,10 +9039,43 @@ dependencies = [ "schnorrkel", "secrecy", "sha2 0.10.8", - "sp-core-hashing", - "subxt", + "sp-core-hashing", + "subxt", + "thiserror", + "zeroize", +] + +[[package]] +name = "svm-rs" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4aebac1b1ef2b46e2e2bdf3c09db304800f2a77c1fa902bd5231490203042be8" +dependencies = [ + "const-hex", + "dirs", + "fs4 0.9.1", + "reqwest 0.12.9", + "semver 1.0.23", + "serde", + "serde_json", + "sha2 0.10.8", + "tempfile", "thiserror", - "zeroize", + "url", + "zip", +] + +[[package]] +name = "svm-rs-builds" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2fa0f145894cb4d1c14446f08098ee5f21fc37ccbd1a7dd9dd355bbc806de3b" +dependencies = [ + "build_const", + "const-hex", + "semver 1.0.23", + "serde_json", + "svm-rs", ] [[package]] @@ -7748,18 +9095,18 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.77" +version = "2.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" +checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "unicode-ident", ] @@ -7771,9 +9118,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -7859,11 +9206,17 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +[[package]] +name = "target-triple" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42a4d50cdb458045afc8131fd91b64904da29548bcb63c7236e0844936c13078" + [[package]] name = "tempfile" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", "fastrand", @@ -7892,6 +9245,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "terminal_size" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" +dependencies = [ + "rustix", + "windows-sys 0.59.0", +] + [[package]] name = "test-casing" version = "0.1.3" @@ -7907,9 +9270,9 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9b53c7124dd88026d5d98a1eb1fd062a578b7d783017c9298825526c7fb6427" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -7929,9 +9292,9 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -7951,22 +9314,22 @@ checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" [[package]] name = "thiserror" -version = "1.0.64" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" +checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.64" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" +checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -8097,9 +9460,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.40.0" +version = "1.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" +checksum = "145f3413504347a2be84393cc8a7d2fb4d863b375909ea59f2158261aa258bbb" dependencies = [ "backtrace", "bytes", @@ -8113,15 +9476,25 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-macros" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -8161,11 +9534,23 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.13", + "rustls 0.23.16", "rustls-pki-types", "tokio", ] +[[package]] +name = "tokio-socks" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d4770b8024672c1101b3f6733eab95b18007dbe0847a8afe341fcf79e06043f" +dependencies = [ + "either", + "futures-util", + "thiserror", + "tokio", +] + [[package]] name = "tokio-stream" version = "0.1.16" @@ -8219,7 +9604,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "toml_datetime", "winnow 0.5.40", ] @@ -8230,13 +9615,44 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", "winnow 0.6.20", ] +[[package]] +name = "tonic" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13" +dependencies = [ + "async-stream", + "async-trait", + "axum 0.6.20", + "base64 0.21.7", + "bytes", + "h2 0.3.26", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.31", + "hyper-timeout 0.4.1", + "percent-encoding", + "pin-project", + "prost 0.12.6", + "rustls-native-certs 0.7.3", + "rustls-pemfile 2.2.0", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.25.0", + "tokio-stream", + "tower 0.4.13", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tonic" version = "0.12.3" @@ -8245,15 +9661,15 @@ checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ "async-stream", "async-trait", - "axum", + "axum 0.7.7", "base64 0.22.1", "bytes", "h2 0.4.6", "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", - "hyper-timeout", + "hyper 1.5.0", + "hyper-timeout 0.5.1", "hyper-util", "percent-encoding", "pin-project", @@ -8309,13 +9725,16 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ + "async-compression", "bitflags 2.6.0", "bytes", + "futures-core", "http 1.1.0", "http-body 1.0.1", "http-body-util", "pin-project-lite", "tokio", + "tokio-util", "tower-layer", "tower-service", ] @@ -8350,9 +9769,9 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -8428,9 +9847,9 @@ dependencies = [ [[package]] name = "triomphe" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6631e42e10b40c0690bf92f404ebcfe6e1fdb480391d15f17cc8e96eeed5369" +checksum = "ef8f7726da4807b58ea5c96fdc122f80702030edc33b35aff9190a51148ccc85" [[package]] name = "try-lock" @@ -8440,14 +9859,15 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.99" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "207aa50d36c4be8d8c6ea829478be44a372c6a77669937bb39c698e52f1491e8" +checksum = "8dcd332a5496c026f1e14b7f3d2b7bd98e509660c04239c58b0ba38a12daded4" dependencies = [ "glob", "serde", "serde_derive", "serde_json", + "target-triple", "termcolor", "toml", ] @@ -8477,9 +9897,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ucd-trie" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] name = "uint" @@ -8502,20 +9922,23 @@ dependencies = [ "libc", ] +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicase" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" -dependencies = [ - "version_check", -] +checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" @@ -8534,9 +9957,9 @@ dependencies = [ [[package]] name = "unicode-properties" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" [[package]] name = "unicode-segmentation" @@ -8594,6 +10017,12 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" +[[package]] +name = "unsigned-varint" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" + [[package]] name = "untrusted" version = "0.9.0" @@ -8639,9 +10068,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" dependencies = [ "serde", ] @@ -8654,9 +10083,9 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "value-bag" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" +checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2" [[package]] name = "vcpkg" @@ -8709,7 +10138,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "671d3b894d5d0849f0a597f56bf071f42d4f2a1cbcf2f78ca21f870ab7c0cc2b" dependencies = [ - "hyper 0.14.30", + "hyper 0.14.31", "once_cell", "tokio", "tracing", @@ -8722,9 +10151,9 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a511871dc5de990a3b2a0e715facfbc5da848c0c0395597a1415029fb7c250a" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -8733,19 +10162,28 @@ version = "0.1.0" dependencies = [ "assert_matches", "criterion", - "iai", "once_cell", "rand 0.8.5", "tokio", "vise", + "yab", "zksync_contracts", "zksync_multivm", + "zksync_test_contracts", "zksync_types", - "zksync_utils", "zksync_vlog", "zksync_vm2", ] +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + [[package]] name = "walkdir" version = "2.5.0" @@ -8785,9 +10223,9 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ "cfg-if", "once_cell", @@ -8796,24 +10234,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.43" +version = "0.4.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" +checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" dependencies = [ "cfg-if", "js-sys", @@ -8823,9 +10261,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ "quote 1.0.37", "wasm-bindgen-macro-support", @@ -8833,28 +10271,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "wasm-streams" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" dependencies = [ "futures-util", "js-sys", @@ -8905,9 +10343,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.70" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" dependencies = [ "js-sys", "wasm-bindgen", @@ -9221,6 +10659,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "yab" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b06cc62d4cec617d3c259537be0fcaa8a5bcf72ddf2983823d9528605f36ed3" +dependencies = [ + "anes", + "clap 4.5.20", + "num_cpus", + "thiserror", +] + [[package]] name = "yansi" version = "1.0.1" @@ -9249,9 +10699,9 @@ version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -9269,9 +10719,26 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", +] + +[[package]] +name = "zip" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc5e4288ea4057ae23afc69a4472434a87a2495cafce6632fd1c4ec9f5cf3494" +dependencies = [ + "arbitrary", + "crc32fast", + "crossbeam-utils", + "displaydoc", + "flate2", + "indexmap 2.6.0", + "memchr", + "thiserror", + "zopfli", ] [[package]] @@ -9342,9 +10809,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.5" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a6e69931f24db5cf333b714721e8d80ff88bfdb7da8c3dc7882612ffddb8d27" +checksum = "3cc74fbe2b45fd19e95c59ea792c795feebdb616ebaa463f0ac567f495f47387" dependencies = [ "anyhow", "lazy_static", @@ -9352,7 +10819,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.5", + "zk_evm_abstractions 0.150.7", ] [[package]] @@ -9383,15 +10850,15 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.5" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6b0720261ab55490fe3a96e96de30d5d7b277940b52ea7f52dbf564eb1748" +checksum = "37f333a3b059899df09e40deb041af881bc03e496fda5eec618ffb5e854ee7df" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.7", ] [[package]] @@ -9440,9 +10907,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.5" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784fa7cfb51e17c5ced112bca43da30b3468b2347b7af0427ad9638759fb140e" +checksum = "d06fb35b00699d25175a2ad447f86a9088af8b0bc698bb57086fb04c13e52eab" dependencies = [ "arrayvec 0.7.6", "boojum", @@ -9454,7 +10921,7 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.7", "zksync_cs_derive", ] @@ -9502,9 +10969,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.5" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79055eae1b6c1ab80793ed9d77d2964c9c896afa4b5dfed278cf58cd10acfe8f" +checksum = "b83f3b279248af4ca86dec20a54127f02110b45570f3f6c1d13df49ba75c28a5" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -9537,7 +11004,6 @@ dependencies = [ "zksync_external_price_api", "zksync_node_fee_model", "zksync_types", - "zksync_utils", ] [[package]] @@ -9547,6 +11013,7 @@ dependencies = [ "anyhow", "bincode", "chrono", + "const-decoder", "ethabi", "hex", "num_enum 0.7.3", @@ -9554,6 +11021,7 @@ dependencies = [ "serde", "serde_json", "serde_with", + "sha2 0.10.8", "strum", "thiserror", "tiny-keccak 2.0.2", @@ -9572,7 +11040,7 @@ dependencies = [ "byteorder", "cfg-if", "crossbeam", - "futures 0.3.30", + "futures 0.3.31", "hex", "lazy_static", "num_cpus", @@ -9590,7 +11058,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "futures 0.3.30", + "futures 0.3.31", "serde", "tempfile", "test-casing", @@ -9628,8 +11096,8 @@ dependencies = [ "anyhow", "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", - "circuit_sequencer_api 0.150.5", - "futures 0.3.30", + "circuit_sequencer_api 0.150.7", + "futures 0.3.31", "itertools 0.10.5", "num_cpus", "rand 0.8.5", @@ -9640,7 +11108,7 @@ dependencies = [ "vise", "zk_evm 0.133.0", "zk_evm 0.141.0", - "zk_evm 0.150.5", + "zk_evm 0.150.7", "zksync_contracts", "zksync_dal", "zksync_eth_client", @@ -9649,8 +11117,8 @@ dependencies = [ "zksync_multivm", "zksync_node_genesis", "zksync_node_test_utils", + "zksync_system_constants", "zksync_types", - "zksync_utils", "zksync_web3_decl", ] @@ -9682,12 +11150,7 @@ dependencies = [ "secrecy", "serde", "serde_json", - "strum", - "strum_macros", - "time", "tracing", - "url", - "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -9747,7 +11210,7 @@ dependencies = [ "anyhow", "async-trait", "rand 0.8.5", - "semver", + "semver 1.0.23", "tracing", "vise", "zksync_concurrency", @@ -9773,14 +11236,14 @@ dependencies = [ "bytesize", "http-body-util", "human-repr", - "hyper 1.4.1", + "hyper 1.5.0", "hyper-util", "im", "once_cell", "pin-project", "prost 0.12.6", "rand 0.8.5", - "semver", + "semver 1.0.23", "snow", "thiserror", "tls-listener", @@ -9881,15 +11344,17 @@ name = "zksync_contract_verification_server" version = "0.1.0" dependencies = [ "anyhow", - "axum", - "serde", + "axum 0.7.7", + "http-body-util", "serde_json", + "test-casing", "tokio", + "tower 0.4.13", "tower-http", "tracing", "vise", - "zksync_config", "zksync_dal", + "zksync_node_test_utils", "zksync_types", ] @@ -9898,16 +11363,13 @@ name = "zksync_contract_verifier" version = "0.1.0" dependencies = [ "anyhow", - "ctrlc", - "futures 0.3.30", - "structopt", + "clap 4.5.20", "tokio", "tracing", "zksync_config", "zksync_contract_verifier_lib", "zksync_core_leftovers", "zksync_dal", - "zksync_env_config", "zksync_queued_job_processor", "zksync_utils", "zksync_vlog", @@ -9918,37 +11380,40 @@ name = "zksync_contract_verifier_lib" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "chrono", "ethabi", "hex", - "lazy_static", "regex", - "semver", + "semver 1.0.23", "serde", "serde_json", "tempfile", + "test-casing", "thiserror", "tokio", "tracing", "vise", - "zksync_config", "zksync_contracts", "zksync_dal", + "zksync_node_test_utils", "zksync_queued_job_processor", "zksync_types", "zksync_utils", + "zksync_vm_interface", ] [[package]] name = "zksync_contracts" version = "0.1.0" dependencies = [ + "bincode", "envy", - "ethabi", "hex", "once_cell", "serde", "serde_json", + "zksync_basic_types", "zksync_utils", ] @@ -9980,7 +11445,6 @@ dependencies = [ "sha2 0.10.8", "thiserror", "zksync_basic_types", - "zksync_utils", ] [[package]] @@ -9990,7 +11454,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5939e2df4288c263c706ff18ac718e984149223ad4289d6d957d767dcfc04c81" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", ] @@ -10010,21 +11474,36 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "backon", "base58", + "bech32", + "bincode", "blake2 0.10.6", "blake2b_simd", + "bytes", + "celestia-types", "flate2", - "futures 0.3.30", + "futures 0.3.31", "hex", + "http 1.1.0", "jsonrpsee 0.23.2", "parity-scale-codec", + "pbjson-types", + "prost 0.12.6", + "reqwest 0.12.9", + "ripemd", "scale-encode", + "secp256k1", "serde", "serde_json", + "sha2 0.10.8", "subxt-metadata", "subxt-signer", "tokio", + "tokio-stream", + "tonic 0.11.0", "tracing", + "zksync_basic_types", "zksync_config", "zksync_da_client", "zksync_env_config", @@ -10038,7 +11517,7 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", - "futures 0.3.30", + "futures 0.3.31", "rand 0.8.5", "tokio", "tracing", @@ -10047,7 +11526,6 @@ dependencies = [ "zksync_da_client", "zksync_dal", "zksync_types", - "zksync_utils", ] [[package]] @@ -10081,9 +11559,8 @@ dependencies = [ "zksync_protobuf", "zksync_protobuf_build", "zksync_system_constants", - "zksync_test_account", + "zksync_test_contracts", "zksync_types", - "zksync_utils", "zksync_vm_interface", ] @@ -10165,7 +11642,6 @@ dependencies = [ "zksync_prover_interface", "zksync_shared_metrics", "zksync_types", - "zksync_utils", ] [[package]] @@ -10202,15 +11678,15 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.29.0" +version = "25.1.0" dependencies = [ "anyhow", "assert_matches", "async-trait", - "clap 4.5.18", + "clap 4.5.20", "envy", - "futures 0.3.30", - "rustc_version", + "futures 0.3.31", + "rustc_version 0.4.1", "serde", "serde_json", "tempfile", @@ -10251,7 +11727,6 @@ dependencies = [ "zksync_state_keeper", "zksync_storage", "zksync_types", - "zksync_utils", "zksync_vlog", "zksync_web3_decl", ] @@ -10267,7 +11742,7 @@ dependencies = [ "fraction", "httpmock", "rand 0.8.5", - "reqwest 0.12.7", + "reqwest 0.12.9", "serde", "serde_json", "tokio", @@ -10283,7 +11758,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "axum", + "axum 0.7.7", "bincode", "thiserror", "tokio", @@ -10317,7 +11792,7 @@ dependencies = [ "num-bigint 0.4.6", "num-integer", "num-traits", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", "serde", "syn 1.0.109", @@ -10329,7 +11804,7 @@ version = "0.1.0" dependencies = [ "assert_matches", "async-trait", - "futures 0.3.30", + "futures 0.3.31", "serde", "serde_json", "thiserror", @@ -10355,9 +11830,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.5" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb8a9c76c172a6d639855ee342b9a670e3ba472f5ae302f771b1c3ee777dc88" +checksum = "dc58af8e4e4ad1a851ffd2275e6a44ead0f15a7eaac9dc9d60a56b3b9c9b08e8" dependencies = [ "boojum", "derivative", @@ -10367,7 +11842,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.5", + "zkevm_circuits 0.150.7", ] [[package]] @@ -10415,7 +11890,7 @@ version = "0.1.0" dependencies = [ "anyhow", "assert_matches", - "clap 4.5.18", + "clap 4.5.20", "insta", "leb128", "once_cell", @@ -10436,7 +11911,6 @@ dependencies = [ "zksync_storage", "zksync_system_constants", "zksync_types", - "zksync_utils", ] [[package]] @@ -10446,11 +11920,11 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "axum", - "futures 0.3.30", + "axum 0.7.7", + "futures 0.3.31", "itertools 0.10.5", "once_cell", - "reqwest 0.12.7", + "reqwest 0.12.9", "serde", "serde_json", "tempfile", @@ -10471,7 +11945,6 @@ dependencies = [ "zksync_shared_metrics", "zksync_storage", "zksync_types", - "zksync_utils", ] [[package]] @@ -10494,12 +11967,13 @@ dependencies = [ "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.142.2", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.7", "ethabi", "hex", "itertools 0.10.5", "once_cell", "pretty_assertions", + "rand 0.8.5", "test-casing", "thiserror", "tracing", @@ -10508,13 +11982,13 @@ dependencies = [ "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.5", + "zk_evm 0.150.7", "zksync_contracts", "zksync_eth_signer", + "zksync_mini_merkle_tree", "zksync_system_constants", - "zksync_test_account", + "zksync_test_contracts", "zksync_types", - "zksync_utils", "zksync_vm2", "zksync_vm_interface", ] @@ -10526,10 +12000,9 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "axum", + "axum 0.7.7", "chrono", - "const-decoder", - "futures 0.3.30", + "futures 0.3.31", "governor", "hex", "http 1.1.0", @@ -10549,7 +12022,7 @@ dependencies = [ "tower-http", "tracing", "vise", - "zk_evm 0.150.5", + "zk_evm 0.150.7", "zksync_config", "zksync_consensus_roles", "zksync_contracts", @@ -10567,8 +12040,8 @@ dependencies = [ "zksync_state", "zksync_state_keeper", "zksync_system_constants", + "zksync_test_contracts", "zksync_types", - "zksync_utils", "zksync_vm_executor", "zksync_web3_decl", ] @@ -10581,12 +12054,13 @@ dependencies = [ "async-trait", "rand 0.8.5", "secrecy", - "semver", + "semver 1.0.23", "tempfile", "test-casing", "thiserror", "tokio", "tracing", + "vise", "zksync_concurrency", "zksync_config", "zksync_consensus_bft", @@ -10609,9 +12083,8 @@ dependencies = [ "zksync_state", "zksync_state_keeper", "zksync_system_constants", - "zksync_test_account", + "zksync_test_contracts", "zksync_types", - "zksync_utils", "zksync_vm_executor", "zksync_vm_interface", "zksync_web3_decl", @@ -10645,7 +12118,6 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "bigdecimal", "test-casing", "tokio", "tracing", @@ -10654,7 +12126,6 @@ dependencies = [ "zksync_dal", "zksync_eth_client", "zksync_types", - "zksync_utils", "zksync_web3_decl", ] @@ -10666,9 +12137,9 @@ dependencies = [ "assert_matches", "async-trait", "ctrlc", - "futures 0.3.30", + "futures 0.3.31", "pin-project-lite", - "semver", + "semver 1.0.23", "thiserror", "tokio", "tracing", @@ -10710,7 +12181,6 @@ dependencies = [ "zksync_state", "zksync_state_keeper", "zksync_storage", - "zksync_tee_verifier_input_producer", "zksync_types", "zksync_utils", "zksync_vlog", @@ -10723,9 +12193,9 @@ dependencies = [ name = "zksync_node_framework_derive" version = "0.1.0" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -10746,7 +12216,6 @@ dependencies = [ "zksync_multivm", "zksync_system_constants", "zksync_types", - "zksync_utils", ] [[package]] @@ -10778,8 +12247,9 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", + "backon", "chrono", - "futures 0.3.30", + "futures 0.3.31", "once_cell", "serde", "serde_json", @@ -10790,6 +12260,7 @@ dependencies = [ "vise", "zksync_concurrency", "zksync_config", + "zksync_consensus_roles", "zksync_contracts", "zksync_dal", "zksync_eth_client", @@ -10800,7 +12271,6 @@ dependencies = [ "zksync_state_keeper", "zksync_system_constants", "zksync_types", - "zksync_utils", "zksync_vm_executor", "zksync_web3_decl", ] @@ -10812,11 +12282,9 @@ dependencies = [ "zksync_contracts", "zksync_dal", "zksync_merkle_tree", - "zksync_multivm", - "zksync_node_genesis", "zksync_system_constants", "zksync_types", - "zksync_utils", + "zksync_vm_interface", ] [[package]] @@ -10833,7 +12301,7 @@ dependencies = [ "http 1.1.0", "prost 0.12.6", "rand 0.8.5", - "reqwest 0.12.7", + "reqwest 0.12.9", "serde_json", "tempfile", "tokio", @@ -10862,12 +12330,13 @@ name = "zksync_proof_data_handler" version = "0.1.0" dependencies = [ "anyhow", - "axum", + "axum 0.7.7", "chrono", - "hyper 1.4.1", + "hyper 1.5.0", "serde_json", "tokio", "tower 0.4.13", + "tower-http", "tracing", "vise", "zksync_basic_types", @@ -10878,6 +12347,7 @@ dependencies = [ "zksync_object_store", "zksync_prover_interface", "zksync_types", + "zksync_vm_executor", ] [[package]] @@ -10890,7 +12360,7 @@ dependencies = [ "bit-vec", "once_cell", "prost 0.12.6", - "prost-reflect", + "prost-reflect 0.12.0", "quick-protobuf", "rand 0.8.5", "serde", @@ -10910,12 +12380,12 @@ dependencies = [ "anyhow", "heck 0.5.0", "prettyplease", - "proc-macro2 1.0.86", + "proc-macro2 1.0.89", "prost-build", - "prost-reflect", - "protox", + "prost-reflect 0.12.0", + "protox 0.5.1", "quote 1.0.37", - "syn 2.0.77", + "syn 2.0.85", ] [[package]] @@ -10929,7 +12399,6 @@ dependencies = [ "secrecy", "serde_json", "serde_yaml", - "time", "tracing", "zksync_basic_types", "zksync_config", @@ -10944,15 +12413,15 @@ version = "0.1.0" dependencies = [ "bincode", "chrono", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.7", "serde", "serde_json", "serde_with", "strum", "tokio", - "zksync_multivm", "zksync_object_store", "zksync_types", + "zksync_vm_interface", ] [[package]] @@ -10993,8 +12462,8 @@ name = "zksync_server" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.18", - "futures 0.3.30", + "clap 4.5.20", + "futures 0.3.31", "serde_json", "tikv-jemallocator", "tokio", @@ -11015,7 +12484,6 @@ dependencies = [ "zksync_protobuf_config", "zksync_storage", "zksync_types", - "zksync_utils", "zksync_vlog", ] @@ -11023,7 +12491,7 @@ dependencies = [ name = "zksync_shared_metrics" version = "0.1.0" dependencies = [ - "rustc_version", + "rustc_version 0.4.1", "tracing", "vise", "zksync_dal", @@ -11037,7 +12505,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "futures 0.3.30", + "futures 0.3.31", "serde", "test-casing", "thiserror", @@ -11049,7 +12517,6 @@ dependencies = [ "zksync_health_check", "zksync_object_store", "zksync_types", - "zksync_utils", "zksync_web3_decl", ] @@ -11092,7 +12559,6 @@ dependencies = [ "zksync_shared_metrics", "zksync_storage", "zksync_types", - "zksync_utils", "zksync_vm_interface", ] @@ -11103,7 +12569,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "futures 0.3.30", + "futures 0.3.31", "hex", "itertools 0.10.5", "once_cell", @@ -11129,9 +12595,8 @@ dependencies = [ "zksync_state", "zksync_storage", "zksync_system_constants", - "zksync_test_account", + "zksync_test_contracts", "zksync_types", - "zksync_utils", "zksync_vm_executor", ] @@ -11154,7 +12619,6 @@ version = "0.1.0" dependencies = [ "once_cell", "zksync_basic_types", - "zksync_utils", ] [[package]] @@ -11164,7 +12628,7 @@ dependencies = [ "anyhow", "async-trait", "envy", - "reqwest 0.12.7", + "reqwest 0.12.9", "secp256k1", "serde", "thiserror", @@ -11187,6 +12651,8 @@ name = "zksync_tee_verifier" version = "0.1.0" dependencies = [ "anyhow", + "bincode", + "once_cell", "serde", "tracing", "zksync_config", @@ -11194,43 +12660,24 @@ dependencies = [ "zksync_crypto_primitives", "zksync_merkle_tree", "zksync_multivm", - "zksync_object_store", - "zksync_prover_interface", - "zksync_types", - "zksync_utils", -] - -[[package]] -name = "zksync_tee_verifier_input_producer" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "tokio", - "tracing", - "vise", - "zksync_dal", - "zksync_object_store", "zksync_prover_interface", - "zksync_queued_job_processor", - "zksync_tee_verifier", "zksync_types", - "zksync_utils", - "zksync_vm_executor", ] [[package]] -name = "zksync_test_account" +name = "zksync_test_contracts" version = "0.1.0" dependencies = [ "ethabi", + "foundry-compilers", "hex", + "once_cell", "rand 0.8.5", - "zksync_contracts", + "serde", + "serde_json", "zksync_eth_signer", "zksync_system_constants", "zksync_types", - "zksync_utils", ] [[package]] @@ -11251,7 +12698,6 @@ dependencies = [ "once_cell", "prost 0.12.6", "rlp", - "secp256k1", "serde", "serde_json", "serde_with", @@ -11260,14 +12706,12 @@ dependencies = [ "tokio", "tracing", "zksync_basic_types", - "zksync_config", "zksync_contracts", "zksync_crypto_primitives", "zksync_mini_merkle_tree", "zksync_protobuf", "zksync_protobuf_build", "zksync_system_constants", - "zksync_utils", ] [[package]] @@ -11276,21 +12720,12 @@ version = "0.1.0" dependencies = [ "anyhow", "assert_matches", - "bigdecimal", - "bincode", - "futures 0.3.30", - "hex", - "num", + "futures 0.3.31", "once_cell", - "rand 0.8.5", - "reqwest 0.12.7", - "serde", + "reqwest 0.12.9", "serde_json", - "thiserror", "tokio", "tracing", - "zk_evm 0.133.0", - "zksync_basic_types", "zksync_vlog", ] @@ -11322,19 +12757,19 @@ dependencies = [ [[package]] name = "zksync_vm2" version = "0.2.1" -source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" +source = "git+https://github.com/matter-labs/vm2.git?rev=457d8a7eea9093af9440662e33e598c13ba41633#457d8a7eea9093af9440662e33e598c13ba41633" dependencies = [ "enum_dispatch", "primitive-types", - "zk_evm_abstractions 0.150.5", - "zkevm_opcode_defs 0.150.5", + "zk_evm_abstractions 0.150.7", + "zkevm_opcode_defs 0.150.7", "zksync_vm2_interface", ] [[package]] name = "zksync_vm2_interface" version = "0.2.1" -source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" +source = "git+https://github.com/matter-labs/vm2.git?rev=457d8a7eea9093af9440662e33e598c13ba41633#457d8a7eea9093af9440662e33e598c13ba41633" dependencies = [ "primitive-types", ] @@ -11347,6 +12782,7 @@ dependencies = [ "assert_matches", "async-trait", "once_cell", + "test-casing", "tokio", "tracing", "vise", @@ -11354,7 +12790,6 @@ dependencies = [ "zksync_dal", "zksync_multivm", "zksync_types", - "zksync_utils", ] [[package]] @@ -11383,8 +12818,8 @@ dependencies = [ "assert_matches", "async-trait", "backon", - "dashmap", - "futures 0.3.30", + "dashmap 5.5.3", + "futures 0.3.31", "once_cell", "rand 0.8.5", "serde", @@ -11403,9 +12838,8 @@ dependencies = [ "zksync_prover_interface", "zksync_state", "zksync_storage", - "zksync_test_account", + "zksync_test_contracts", "zksync_types", - "zksync_utils", "zksync_vm_executor", "zksync_vm_interface", ] @@ -11417,12 +12851,12 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "futures 0.3.30", + "futures 0.3.31", "jsonrpsee 0.23.2", "pin-project-lite", "rand 0.8.5", "rlp", - "rustls 0.23.13", + "rustls 0.23.16", "serde", "serde_json", "test-casing", @@ -11434,6 +12868,38 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zopfli" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5019f391bac5cf252e93bbcc53d039ffd62c7bfb7c150414d61369afe57e946" +dependencies = [ + "bumpalo", + "crc32fast", + "lockfree-object-pool", + "log", + "once_cell", + "simd-adler32", +] + +[[package]] +name = "zstd" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" +dependencies = [ + "zstd-sys", +] + [[package]] name = "zstd-sys" version = "2.0.13+zstd.1.5.6" diff --git a/Cargo.toml b/Cargo.toml index 60b5628f4191..af7620a5216f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,7 +35,6 @@ members = [ "core/node/consensus", "core/node/contract_verification_server", "core/node/api_server", - "core/node/tee_verifier_input_producer", "core/node/base_token_adjuster", "core/node/external_proof_integration_api", "core/node/logs_bloom_backfill", @@ -76,8 +75,8 @@ members = [ "core/lib/snapshots_applier", "core/lib/crypto_primitives", "core/lib/external_price_api", + "core/lib/test_contracts", # Test infrastructure - "core/tests/test_account", "core/tests/loadnext", "core/tests/vm-benchmark", ] @@ -111,6 +110,7 @@ backon = "0.4.4" bigdecimal = "0.4.5" bincode = "1" blake2 = "0.10" +bytes = "1" chrono = "0.4" clap = "4.2.2" codegen = "0.2.0" @@ -122,6 +122,7 @@ derive_more = "1.0.0" envy = "0.4" ethabi = "18.0.0" flate2 = "1.0.28" +fraction = "0.15.3" futures = "0.3" glob = "0.3" google-cloud-auth = "0.16.0" @@ -129,13 +130,12 @@ google-cloud-storage = "0.20.0" governor = "0.4.2" hex = "0.4" http = "1.1" +http-body-util = "0.1.2" httpmock = "0.7.0" hyper = "1.3" -iai = "0.1" insta = "1.29.0" itertools = "0.10" jsonrpsee = { version = "0.23", default-features = false } -lazy_static = "1.4" leb128 = "0.2.5" lru = { version = "0.12.1", default-features = false } mini-moka = "0.10.0" @@ -150,13 +150,13 @@ opentelemetry-semantic-conventions = "0.16.0" opentelemetry-appender-tracing = "0.5" pin-project-lite = "0.2.13" pretty_assertions = "1" -prost = "0.12.1" +prost = "0.12.6" rand = "0.8" rayon = "1.3.1" regex = "1" reqwest = "0.12" rlp = "0.5" -rocksdb = "0.21.0" +rocksdb = "0.21" rustc_version = "0.4.0" rustls = "0.23" secp256k1 = { version = "0.27.0", features = ["recovery", "global-context"] } @@ -173,7 +173,6 @@ sqlx = "0.8.1" static_assertions = "1.1" structopt = "0.3.20" strum = "0.26" -strum_macros = "0.26.4" tempfile = "3.0.2" test-casing = "0.1.2" test-log = "0.2.15" @@ -190,7 +189,7 @@ tracing-opentelemetry = "0.25.0" time = "0.3.36" # Has to be same as used by `tracing-subscriber` url = "2" web3 = "0.19.0" -fraction = "0.15.3" +yab = "0.1.0" # Proc-macro syn = "2.0" @@ -201,6 +200,7 @@ trybuild = "1.0" # "Internal" dependencies vise = "0.2.0" vise-exporter = "0.2.0" +foundry-compilers = { version = "0.11.6", git = "https://github.com/Moonsong-Labs/compilers.git", rev = "7c69695e5c75451f158dd2456bf8c94a7492ea0b" } # DA clients' dependencies # Avail @@ -211,6 +211,16 @@ subxt-metadata = "0.34.0" parity-scale-codec = { version = "3.6.9", default-features = false } subxt-signer = { version = "0.34", default-features = false } +# Celestia +celestia-types = "0.6.1" +bech32 = "0.11.0" +ripemd = "0.1.3" +tonic = { version = "0.11.0", default-features = false } +pbjson-types = "0.6.0" + +# Eigen +tokio-stream = "0.1.16" + # Here and below: # We *always* pin the latest version of protocol to disallow accidental changes in the execution logic. # However, for the historical version of protocol crates, we have lax requirements. Otherwise, @@ -219,18 +229,18 @@ circuit_sequencer_api_1_3_3 = { package = "circuit_sequencer_api", version = "0. circuit_sequencer_api_1_4_0 = { package = "circuit_sequencer_api", version = "0.140" } circuit_sequencer_api_1_4_1 = { package = "circuit_sequencer_api", version = "0.141" } circuit_sequencer_api_1_4_2 = { package = "circuit_sequencer_api", version = "0.142" } -circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.5" } +circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.7" } crypto_codegen = { package = "zksync_solidity_vk_codegen", version = "=0.30.1" } -kzg = { package = "zksync_kzg", version = "=0.150.5" } +kzg = { package = "zksync_kzg", version = "=0.150.7" } zk_evm = { version = "=0.133.0" } zk_evm_1_3_1 = { package = "zk_evm", version = "0.131.0-rc.2" } zk_evm_1_3_3 = { package = "zk_evm", version = "0.133" } zk_evm_1_4_0 = { package = "zk_evm", version = "0.140" } zk_evm_1_4_1 = { package = "zk_evm", version = "0.141" } -zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.5" } +zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.7" } # New VM; pinned to a specific commit because of instability -zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "a233d44bbe61dc6a758a754c3b78fe4f83e56699" } +zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "457d8a7eea9093af9440662e33e598c13ba41633" } # Consensus dependencies. zksync_concurrency = "=0.5.0" @@ -275,7 +285,7 @@ zksync_state = { version = "0.1.0", path = "core/lib/state" } zksync_storage = { version = "0.1.0", path = "core/lib/storage" } zksync_system_constants = { version = "0.1.0", path = "core/lib/constants" } zksync_tee_verifier = { version = "0.1.0", path = "core/lib/tee_verifier" } -zksync_test_account = { version = "0.1.0", path = "core/tests/test_account" } +zksync_test_contracts = { version = "0.1.0", path = "core/lib/test_contracts" } zksync_types = { version = "0.1.0", path = "core/lib/types" } zksync_utils = { version = "0.1.0", path = "core/lib/utils" } zksync_web3_decl = { version = "0.1.0", path = "core/lib/web3_decl" } @@ -309,6 +319,5 @@ zksync_node_storage_init = { version = "0.1.0", path = "core/node/node_storage_i zksync_node_consensus = { version = "0.1.0", path = "core/node/consensus" } zksync_contract_verification_server = { version = "0.1.0", path = "core/node/contract_verification_server" } zksync_node_api_server = { version = "0.1.0", path = "core/node/api_server" } -zksync_tee_verifier_input_producer = { version = "0.1.0", path = "core/node/tee_verifier_input_producer" } zksync_base_token_adjuster = { version = "0.1.0", path = "core/node/base_token_adjuster" } zksync_logs_bloom_backfill = { version = "0.1.0", path = "core/node/logs_bloom_backfill" } diff --git a/README.md b/README.md index ce73242f11e7..f12ec08f3773 100644 --- a/README.md +++ b/README.md @@ -7,19 +7,12 @@ decentralization. Since it's EVM compatible (Solidity/Vyper), 99% of Ethereum pr or re-auditing a single line of code. ZKsync Era also uses an LLVM-based compiler that will eventually let developers write smart contracts in C++, Rust and other popular languages. -## Knowledge Index - -The following questions will be answered by the following resources: - -| Question | Resource | -| ------------------------------------------------------- | ---------------------------------------------- | -| What do I need to develop the project locally? | [development.md](docs/guides/development.md) | -| How can I set up my dev environment? | [setup-dev.md](docs/guides/setup-dev.md) | -| How can I run the project? | [launch.md](docs/guides/launch.md) | -| How can I build Docker images? | [build-docker.md](docs/guides/build-docker.md) | -| What is the logical project structure and architecture? | [architecture.md](docs/guides/architecture.md) | -| Where can I find protocol specs? | [specs.md](docs/specs/README.md) | -| Where can I find developer docs? | [docs](https://docs.zksync.io) | +## Documentation + +The most recent documentation can be found here: + +- [Core documentation](https://matter-labs.github.io/zksync-era/core/latest/) +- [Prover documentation](https://matter-labs.github.io/zksync-era/prover/latest/) ## Policies diff --git a/bin/ci_localnet_up b/bin/ci_localnet_up index 8673a909af77..c399de410d74 100755 --- a/bin/ci_localnet_up +++ b/bin/ci_localnet_up @@ -4,6 +4,5 @@ set -e cd $ZKSYNC_HOME -mkdir -p ./volumes/postgres ./volumes/reth/data run_retried docker-compose pull docker-compose --profile runner up -d --wait diff --git a/bin/run_loadtest_from_github_actions b/bin/run_loadtest_from_github_actions index f784ddd3180d..149988d63d8f 100755 --- a/bin/run_loadtest_from_github_actions +++ b/bin/run_loadtest_from_github_actions @@ -11,11 +11,12 @@ export TRANSACTION_WEIGHTS_WITHDRAWAL=${weights[3]} read -ra execution_params <<<"$CONTRACT_EXECUTION_PARAMS" #reading $CONTRACT_EXECUTION_PARAMS as an array as tokens separated by IFS export CONTRACT_EXECUTION_PARAMS_READS=${execution_params[0]} -export CONTRACT_EXECUTION_PARAMS_WRITES=${execution_params[1]} -export CONTRACT_EXECUTION_PARAMS_EVENTS=${execution_params[2]} -export CONTRACT_EXECUTION_PARAMS_HASHES=${execution_params[3]} -export CONTRACT_EXECUTION_PARAMS_RECURSIVE_CALLS=${execution_params[4]} -export CONTRACT_EXECUTION_PARAMS_DEPLOYS=${execution_params[5]} +export CONTRACT_EXECUTION_PARAMS_INITIAL_WRITES=${execution_params[1]} +export CONTRACT_EXECUTION_PARAMS_REPEATED_WRITES=${execution_params[2]} +export CONTRACT_EXECUTION_PARAMS_EVENTS=${execution_params[3]} +export CONTRACT_EXECUTION_PARAMS_HASHES=${execution_params[4]} +export CONTRACT_EXECUTION_PARAMS_RECURSIVE_CALLS=${execution_params[5]} +export CONTRACT_EXECUTION_PARAMS_DEPLOYS=${execution_params[6]} # Run the test cargo run --bin loadnext diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 59b49af15540..3ccd261273b1 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,76 @@ # Changelog +## [25.1.0](https://github.com/matter-labs/zksync-era/compare/core-v25.0.0...core-v25.1.0) (2024-11-04) + + +### Features + +* add `block.timestamp` asserter for AA ([#3031](https://github.com/matter-labs/zksync-era/issues/3031)) ([069d38d](https://github.com/matter-labs/zksync-era/commit/069d38d6c9ddd8b6c404596c479f94b9fc86db40)) +* allow vm2 tracers to stop execution ([#3183](https://github.com/matter-labs/zksync-era/issues/3183)) ([9dae839](https://github.com/matter-labs/zksync-era/commit/9dae839935d82a1e73be220d17567f3382131039)) +* **api:** get rid of tx receipt root ([#3187](https://github.com/matter-labs/zksync-era/issues/3187)) ([6c034f6](https://github.com/matter-labs/zksync-era/commit/6c034f6e180cc92e99766f14c8840c90efa56cec)) +* **api:** Integrate new VM into API server (no tracers) ([#3033](https://github.com/matter-labs/zksync-era/issues/3033)) ([8e75d4b](https://github.com/matter-labs/zksync-era/commit/8e75d4b812b21bc26e2c38ceeb711a8a530d7bc2)) +* base token integration tests ([#2509](https://github.com/matter-labs/zksync-era/issues/2509)) ([8db7e93](https://github.com/matter-labs/zksync-era/commit/8db7e9306e5fa23f066be106363e6455531bbc09)) +* **consensus:** enabled syncing pregenesis blocks over p2p ([#3192](https://github.com/matter-labs/zksync-era/issues/3192)) ([6adb224](https://github.com/matter-labs/zksync-era/commit/6adb2249ff0946ec6d02f25437c9f71b1079ad79)) +* **da-clients:** add Celestia client ([#2983](https://github.com/matter-labs/zksync-era/issues/2983)) ([d88b875](https://github.com/matter-labs/zksync-era/commit/d88b875464ec5ac7e54aba0cc7c0a68c01969782)) +* **da-clients:** add EigenDA client ([#3155](https://github.com/matter-labs/zksync-era/issues/3155)) ([5161eed](https://github.com/matter-labs/zksync-era/commit/5161eeda5905d33f4d038a2a04ced3e06f39d593)) +* gateway preparation ([#3006](https://github.com/matter-labs/zksync-era/issues/3006)) ([16f2757](https://github.com/matter-labs/zksync-era/commit/16f275756cd28024a6b11ac1ac327eb5b8b446e1)) +* Implement gas relay mode and inclusion data for data attestation ([#3070](https://github.com/matter-labs/zksync-era/issues/3070)) ([561fc1b](https://github.com/matter-labs/zksync-era/commit/561fc1bddfc79061dab9d8d150baa06acfa90692)) +* **metadata-calculator:** Add debug endpoints for tree API ([#3167](https://github.com/matter-labs/zksync-era/issues/3167)) ([3815252](https://github.com/matter-labs/zksync-era/commit/3815252790fd0e9094f308b58dfde3a8b1a82277)) +* **proof-data-handler:** add first processed batch option ([#3112](https://github.com/matter-labs/zksync-era/issues/3112)) ([1eb69d4](https://github.com/matter-labs/zksync-era/commit/1eb69d467802d07f3fc6502de97ff04a69f952fc)) +* **proof-data-handler:** add tee_proof_generation_timeout_in_secs param ([#3128](https://github.com/matter-labs/zksync-era/issues/3128)) ([f3724a7](https://github.com/matter-labs/zksync-era/commit/f3724a71c7466451d380981b05d68d8afd70cdca)) +* **prover:** Add queue metric to report autoscaler view of the queue. ([#3206](https://github.com/matter-labs/zksync-era/issues/3206)) ([2721396](https://github.com/matter-labs/zksync-era/commit/272139690e028d3bdebdb6bcb1824fec23cefd0f)) +* **prover:** Add sending scale requests for Scaler targets ([#3194](https://github.com/matter-labs/zksync-era/issues/3194)) ([767c5bc](https://github.com/matter-labs/zksync-era/commit/767c5bc6a62c402c099abe93b7dbecbb59e4acb7)) +* **prover:** Add support for scaling WGs and compressor ([#3179](https://github.com/matter-labs/zksync-era/issues/3179)) ([c41db9e](https://github.com/matter-labs/zksync-era/commit/c41db9ecec1c21b80969604f703ac6990f6f3434)) +* **vm:** Support EVM emulation in fast VM ([#3163](https://github.com/matter-labs/zksync-era/issues/3163)) ([9ad1f0d](https://github.com/matter-labs/zksync-era/commit/9ad1f0d77e5a5b411f7866ef6a1819373c07f91b)) + + +### Bug Fixes + +* **consensus:** better logging of errors ([#3170](https://github.com/matter-labs/zksync-era/issues/3170)) ([a5028da](https://github.com/matter-labs/zksync-era/commit/a5028da65608898ad41c6a4fd5c6ec4c28a45703)) +* **consensus:** made attestation controller non-critical ([#3180](https://github.com/matter-labs/zksync-era/issues/3180)) ([6ee9f1f](https://github.com/matter-labs/zksync-era/commit/6ee9f1f431f95514d58db87a4562e09df9d09f86)) +* **consensus:** payload encoding protected by protocol_version ([#3168](https://github.com/matter-labs/zksync-era/issues/3168)) ([8089b78](https://github.com/matter-labs/zksync-era/commit/8089b78b3f2cdbe8d0a23e9b8412a8022d78ada2)) +* **da-clients:** add padding to the data within EigenDA blob ([#3203](https://github.com/matter-labs/zksync-era/issues/3203)) ([8ae06b2](https://github.com/matter-labs/zksync-era/commit/8ae06b237647715937fb3656d881c0fd460f2a07)) +* **da-clients:** enable tls-roots feature for tonic ([#3201](https://github.com/matter-labs/zksync-era/issues/3201)) ([42f177a](https://github.com/matter-labs/zksync-era/commit/42f177ac43b86cd24321ad9222121fc8a91c49e0)) +* extend allowed storage slots for validation as per EIP-7562 ([#3166](https://github.com/matter-labs/zksync-era/issues/3166)) ([c76da16](https://github.com/matter-labs/zksync-era/commit/c76da16efc769243a02c6e859376182d95ab941d)) +* **merkle-tree:** Fix tree truncation ([#3178](https://github.com/matter-labs/zksync-era/issues/3178)) ([9654097](https://github.com/matter-labs/zksync-era/commit/96540975d917761d8e464ebbdf52704955bcd898)) +* **tee_prover:** add prometheus pull listener ([#3169](https://github.com/matter-labs/zksync-era/issues/3169)) ([1ffd22f](https://github.com/matter-labs/zksync-era/commit/1ffd22ffbe710469de0e7f27c6aae29453ec6d3e)) +* update logging in cbt l1 behaviour ([#3149](https://github.com/matter-labs/zksync-era/issues/3149)) ([d0f61b0](https://github.com/matter-labs/zksync-era/commit/d0f61b0552dcacc2e8e33fdbcae6f1e5fbb43820)) + +## [25.0.0](https://github.com/matter-labs/zksync-era/compare/core-v24.29.0...core-v25.0.0) (2024-10-23) + + +### ⚠ BREAKING CHANGES + +* **contracts:** integrate protocol defense changes ([#2737](https://github.com/matter-labs/zksync-era/issues/2737)) + +### Features + +* Add CoinMarketCap external API ([#2971](https://github.com/matter-labs/zksync-era/issues/2971)) ([c1cb30e](https://github.com/matter-labs/zksync-era/commit/c1cb30e59ca1d0b5fea5fe0980082aea0eb04aa2)) +* **api:** Implement eth_maxPriorityFeePerGas ([#3135](https://github.com/matter-labs/zksync-era/issues/3135)) ([35e84cc](https://github.com/matter-labs/zksync-era/commit/35e84cc03a7fdd315932fb3020fe41c95a6e4bca)) +* **api:** Make acceptable values cache lag configurable ([#3028](https://github.com/matter-labs/zksync-era/issues/3028)) ([6747529](https://github.com/matter-labs/zksync-era/commit/67475292ff770d2edd6884be27f976a4144778ae)) +* **contracts:** integrate protocol defense changes ([#2737](https://github.com/matter-labs/zksync-era/issues/2737)) ([c60a348](https://github.com/matter-labs/zksync-era/commit/c60a3482ee09b3e371163e62f49e83bc6d6f4548)) +* **external-node:** save protocol version before opening a batch ([#3136](https://github.com/matter-labs/zksync-era/issues/3136)) ([d6de4f4](https://github.com/matter-labs/zksync-era/commit/d6de4f40ddce339c760c95e2bf4b8aceb571af7f)) +* Prover e2e test ([#2975](https://github.com/matter-labs/zksync-era/issues/2975)) ([0edd796](https://github.com/matter-labs/zksync-era/commit/0edd7962429b3530ae751bd7cc947c97193dd0ca)) +* **prover:** Add min_provers and dry_run features. Improve metrics and test. ([#3129](https://github.com/matter-labs/zksync-era/issues/3129)) ([7c28964](https://github.com/matter-labs/zksync-era/commit/7c289649b7b3c418c7193a35b51c264cf4970f3c)) +* **tee_verifier:** speedup SQL query for new jobs ([#3133](https://github.com/matter-labs/zksync-era/issues/3133)) ([30ceee8](https://github.com/matter-labs/zksync-era/commit/30ceee8a48046e349ff0234ebb24d468a0e0876c)) +* vm2 tracers can access storage ([#3114](https://github.com/matter-labs/zksync-era/issues/3114)) ([e466b52](https://github.com/matter-labs/zksync-era/commit/e466b52948e3c4ed1cb5af4fd999a52028e4d216)) +* **vm:** Return compressed bytecodes from `push_transaction()` ([#3126](https://github.com/matter-labs/zksync-era/issues/3126)) ([37f209f](https://github.com/matter-labs/zksync-era/commit/37f209fec8e7cb65c0e60003d46b9ea69c43caf1)) + + +### Bug Fixes + +* **call_tracer:** Flat call tracer fixes for blocks ([#3095](https://github.com/matter-labs/zksync-era/issues/3095)) ([30ddb29](https://github.com/matter-labs/zksync-era/commit/30ddb292977340beab37a81f75c35480cbdd59d3)) +* **consensus:** preventing config update reverts ([#3148](https://github.com/matter-labs/zksync-era/issues/3148)) ([caee55f](https://github.com/matter-labs/zksync-era/commit/caee55fef4eed0ec58cceaeba277bbdedf5c6f51)) +* **en:** Return `SyncState` health check ([#3142](https://github.com/matter-labs/zksync-era/issues/3142)) ([abeee81](https://github.com/matter-labs/zksync-era/commit/abeee8190d3c3a5e577d71024bdfb30ff516ad03)) +* **external-node:** delete empty unsealed batch on EN initialization ([#3125](https://github.com/matter-labs/zksync-era/issues/3125)) ([5d5214b](https://github.com/matter-labs/zksync-era/commit/5d5214ba983823b306495d34fdd1d46abacce07a)) +* Fix counter metric type to be Counter. ([#3153](https://github.com/matter-labs/zksync-era/issues/3153)) ([08a3fe7](https://github.com/matter-labs/zksync-era/commit/08a3fe7ffd0410c51334193068649905337d5e84)) +* **mempool:** minor mempool improvements ([#3113](https://github.com/matter-labs/zksync-era/issues/3113)) ([cd16083](https://github.com/matter-labs/zksync-era/commit/cd160830a0b7ebe5af4ecbd944da1cd51af3528a)) +* **prover:** Run for zero queue to allow scaling down to 0 ([#3115](https://github.com/matter-labs/zksync-era/issues/3115)) ([bbe1919](https://github.com/matter-labs/zksync-era/commit/bbe191937fa5c5711a7164fd4f0c2ae65cda0833)) +* restore instruction count functionality ([#3081](https://github.com/matter-labs/zksync-era/issues/3081)) ([6159f75](https://github.com/matter-labs/zksync-era/commit/6159f7531a0340a69c4926c4e0325811ed7cabb8)) +* **state-keeper:** save call trace for upgrade txs ([#3132](https://github.com/matter-labs/zksync-era/issues/3132)) ([e1c363f](https://github.com/matter-labs/zksync-era/commit/e1c363f8f5e03c8d62bba1523f17b87d6a0e25ad)) +* **tee_prover:** add zstd compression ([#3144](https://github.com/matter-labs/zksync-era/issues/3144)) ([7241ae1](https://github.com/matter-labs/zksync-era/commit/7241ae139b2b6bf9a9966eaa2f22203583a3786f)) +* **tee_verifier:** correctly initialize storage for re-execution ([#3017](https://github.com/matter-labs/zksync-era/issues/3017)) ([9d88373](https://github.com/matter-labs/zksync-era/commit/9d88373f1b745c489e98e5ef542644a70e815498)) + ## [24.29.0](https://github.com/matter-labs/zksync-era/compare/core-v24.28.0...core-v24.29.0) (2024-10-14) diff --git a/core/bin/contract-verifier/Cargo.toml b/core/bin/contract-verifier/Cargo.toml index f088c2337e71..5e9a9efc6e7e 100644 --- a/core/bin/contract-verifier/Cargo.toml +++ b/core/bin/contract-verifier/Cargo.toml @@ -12,7 +12,6 @@ publish = false [dependencies] zksync_dal.workspace = true -zksync_env_config.workspace = true zksync_config = { workspace = true, features = ["observability_ext"] } zksync_contract_verifier_lib.workspace = true zksync_queued_job_processor.workspace = true @@ -21,8 +20,6 @@ zksync_vlog.workspace = true zksync_core_leftovers.workspace = true anyhow.workspace = true +clap = { workspace = true, features = ["derive"] } tokio = { workspace = true, features = ["full"] } -futures.workspace = true -ctrlc.workspace = true -structopt.workspace = true tracing.workspace = true diff --git a/core/bin/contract-verifier/src/main.rs b/core/bin/contract-verifier/src/main.rs index a8162de13e9d..88f25256c40d 100644 --- a/core/bin/contract-verifier/src/main.rs +++ b/core/bin/contract-verifier/src/main.rs @@ -1,132 +1,41 @@ -use std::{cell::RefCell, time::Duration}; +use std::{path::PathBuf, time::Duration}; -use anyhow::Context; -use futures::{channel::mpsc, executor::block_on, SinkExt, StreamExt}; -use structopt::StructOpt; +use anyhow::Context as _; +use clap::Parser; use tokio::sync::watch; use zksync_config::configs::PrometheusConfig; use zksync_contract_verifier_lib::ContractVerifier; use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_general_config}; -use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_dal::{ConnectionPool, Core}; use zksync_queued_job_processor::JobProcessor; -use zksync_utils::{env::Workspace, wait_for_tasks::ManagedTasks}; +use zksync_utils::wait_for_tasks::ManagedTasks; use zksync_vlog::prometheus::PrometheusExporterConfig; -async fn update_compiler_versions(connection_pool: &ConnectionPool) { - let mut storage = connection_pool.connection().await.unwrap(); - let mut transaction = storage.start_transaction().await.unwrap(); - - let zksync_home = Workspace::locate().core(); - - let zksolc_path = zksync_home.join("etc/zksolc-bin/"); - let zksolc_versions: Vec = std::fs::read_dir(zksolc_path) - .unwrap() - .filter_map(|file| { - let file = file.unwrap(); - let Ok(file_type) = file.file_type() else { - return None; - }; - if file_type.is_dir() { - file.file_name().into_string().ok() - } else { - None - } - }) - .collect(); - transaction - .contract_verification_dal() - .set_zksolc_versions(zksolc_versions) - .await - .unwrap(); - - let solc_path = zksync_home.join("etc/solc-bin/"); - let solc_versions: Vec = std::fs::read_dir(solc_path) - .unwrap() - .filter_map(|file| { - let file = file.unwrap(); - let Ok(file_type) = file.file_type() else { - return None; - }; - if file_type.is_dir() { - file.file_name().into_string().ok() - } else { - None - } - }) - .collect(); - transaction - .contract_verification_dal() - .set_solc_versions(solc_versions) - .await - .unwrap(); - - let zkvyper_path = zksync_home.join("etc/zkvyper-bin/"); - let zkvyper_versions: Vec = std::fs::read_dir(zkvyper_path) - .unwrap() - .filter_map(|file| { - let file = file.unwrap(); - let Ok(file_type) = file.file_type() else { - return None; - }; - if file_type.is_dir() { - file.file_name().into_string().ok() - } else { - None - } - }) - .collect(); - transaction - .contract_verification_dal() - .set_zkvyper_versions(zkvyper_versions) - .await - .unwrap(); - - let vyper_path = zksync_home.join("etc/vyper-bin/"); - let vyper_versions: Vec = std::fs::read_dir(vyper_path) - .unwrap() - .filter_map(|file| { - let file = file.unwrap(); - let Ok(file_type) = file.file_type() else { - return None; - }; - if file_type.is_dir() { - file.file_name().into_string().ok() - } else { - None - } - }) - .collect(); - - transaction - .contract_verification_dal() - .set_vyper_versions(vyper_versions) - .await - .unwrap(); - - transaction.commit().await.unwrap(); -} - -#[derive(StructOpt)] -#[structopt(name = "ZKsync contract code verifier", author = "Matter Labs")] +#[derive(Debug, Parser)] +#[command(name = "ZKsync contract code verifier", author = "Matter Labs")] struct Opt { /// Number of jobs to process. If None, runs indefinitely. - #[structopt(long)] + #[arg(long)] jobs_number: Option, /// Path to the configuration file. - #[structopt(long)] - config_path: Option, + #[arg(long)] + config_path: Option, /// Path to the secrets file. - #[structopt(long)] - secrets_path: Option, + #[arg(long)] + secrets_path: Option, } #[tokio::main] async fn main() -> anyhow::Result<()> { - let opt = Opt::from_args(); + let opt = Opt::parse(); let general_config = load_general_config(opt.config_path).context("general config")?; - let database_secrets = load_database_secrets(opt.secrets_path).context("database secrets")?; + let observability_config = general_config + .observability + .context("ObservabilityConfig")?; + let _observability_guard = observability_config.install()?; + let database_secrets = load_database_secrets(opt.secrets_path).context("database secrets")?; let verifier_config = general_config .contract_verifier .context("ContractVerifierConfig")?; @@ -140,33 +49,13 @@ async fn main() -> anyhow::Result<()> { .context("Master DB URL is absent")?, ) .build() - .await - .unwrap(); - - let observability_config = general_config - .observability - .context("ObservabilityConfig")?; - - let _observability_guard = observability_config.install()?; + .await?; let (stop_sender, stop_receiver) = watch::channel(false); - let (stop_signal_sender, mut stop_signal_receiver) = mpsc::channel(256); - { - let stop_signal_sender = RefCell::new(stop_signal_sender.clone()); - ctrlc::set_handler(move || { - let mut sender = stop_signal_sender.borrow_mut(); - block_on(sender.send(true)).expect("Ctrl+C signal send"); - }) - .expect("Error setting Ctrl+C handler"); - } - - update_compiler_versions(&pool).await; - - let contract_verifier = ContractVerifier::new(verifier_config, pool); + let contract_verifier = ContractVerifier::new(verifier_config.compilation_timeout(), pool) + .await + .context("failed initializing contract verifier")?; let tasks = vec![ - // TODO PLA-335: Leftovers after the prover DB split. - // The prover connection pool is not used by the contract verifier, but we need to pass it - // since `JobProcessor` trait requires it. tokio::spawn(contract_verifier.run(stop_receiver.clone(), opt.jobs_number)), tokio::spawn( PrometheusExporterConfig::pull(prometheus_config.listener_port).run(stop_receiver), @@ -176,7 +65,7 @@ async fn main() -> anyhow::Result<()> { let mut tasks = ManagedTasks::new(tasks); tokio::select! { () = tasks.wait_single() => {}, - _ = stop_signal_receiver.next() => { + _ = tokio::signal::ctrl_c() => { tracing::info!("Stop signal received, shutting down"); }, }; diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index 25f2400c79bb..9979e988bbbb 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.29.0" # x-release-please-version +version = "25.1.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true @@ -20,7 +20,6 @@ zksync_config.workspace = true zksync_protobuf_config.workspace = true zksync_eth_client.workspace = true zksync_storage.workspace = true -zksync_utils.workspace = true zksync_state.workspace = true zksync_contracts.workspace = true zksync_l1_contract_interface.workspace = true diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 56ee3edfd253..81604f83008a 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -1,6 +1,7 @@ use std::{ env, ffi::OsString, + future::Future, num::{NonZeroU32, NonZeroU64, NonZeroUsize}, path::PathBuf, time::Duration, @@ -24,7 +25,7 @@ use zksync_core_leftovers::temp_config_store::read_yaml_repr; use zksync_dal::{ConnectionPool, Core}; use zksync_metadata_calculator::MetadataCalculatorRecoveryConfig; use zksync_node_api_server::{ - tx_sender::TxSenderConfig, + tx_sender::{TimestampAsserterParams, TxSenderConfig}, web3::{state::InternalApiConfig, Namespace}, }; use zksync_protobuf_config::proto; @@ -110,12 +111,18 @@ pub(crate) struct RemoteENConfig { // the `l2_erc20_bridge_addr` and `l2_shared_bridge_addr` are basically the same contract, but with // a different name, with names adapted only for consistency. pub l1_shared_bridge_proxy_addr: Option
, + /// Contract address that serves as a shared bridge on L2. + /// It is expected that `L2SharedBridge` is used before gateway upgrade, and `L2AssetRouter` is used after. pub l2_shared_bridge_addr: Option
, + /// Address of `L2SharedBridge` that was used before gateway upgrade. + /// `None` if chain genesis used post-gateway protocol version. + pub l2_legacy_shared_bridge_addr: Option
, pub l1_erc20_bridge_proxy_addr: Option
, pub l2_erc20_bridge_addr: Option
, pub l1_weth_bridge_addr: Option
, pub l2_weth_bridge_addr: Option
, pub l2_testnet_paymaster_addr: Option
, + pub l2_timestamp_asserter_addr: Option
, pub base_token_addr: Address, pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, pub dummy_verifier: bool, @@ -141,22 +148,19 @@ impl RemoteENConfig { .get_main_contract() .rpc_context("get_main_contract") .await?; - let base_token_addr = match client.get_base_token_l1_address().await { - Err(ClientError::Call(err)) - if [ - ErrorCode::MethodNotFound.code(), - // This what `Web3Error::NotImplemented` gets - // `casted` into in the `api` server. - ErrorCode::InternalError.code(), - ] - .contains(&(err.code())) => - { - // This is the fallback case for when the EN tries to interact - // with a node that does not implement the `zks_baseTokenL1Address` endpoint. - ETHEREUM_ADDRESS - } - response => response.context("Failed to fetch base token address")?, - }; + + let timestamp_asserter_address = handle_rpc_response_with_fallback( + client.get_timestamp_asserter(), + None, + "Failed to fetch timestamp asserter address".to_string(), + ) + .await?; + let base_token_addr = handle_rpc_response_with_fallback( + client.get_base_token_l1_address(), + ETHEREUM_ADDRESS, + "Failed to fetch base token address".to_string(), + ) + .await?; // These two config variables should always have the same value. // TODO(EVM-578): double check and potentially forbid both of them being `None`. @@ -189,6 +193,7 @@ impl RemoteENConfig { l2_erc20_bridge_addr: l2_erc20_default_bridge, l1_shared_bridge_proxy_addr: bridges.l1_shared_default_bridge, l2_shared_bridge_addr: l2_erc20_shared_bridge, + l2_legacy_shared_bridge_addr: bridges.l2_legacy_shared_bridge, l1_weth_bridge_addr: bridges.l1_weth_bridge, l2_weth_bridge_addr: bridges.l2_weth_bridge, base_token_addr, @@ -200,6 +205,7 @@ impl RemoteENConfig { .as_ref() .map(|a| a.dummy_verifier) .unwrap_or_default(), + l2_timestamp_asserter_addr: timestamp_asserter_address, }) } @@ -218,9 +224,36 @@ impl RemoteENConfig { l1_shared_bridge_proxy_addr: Some(Address::repeat_byte(5)), l1_weth_bridge_addr: None, l2_shared_bridge_addr: Some(Address::repeat_byte(6)), + l2_legacy_shared_bridge_addr: Some(Address::repeat_byte(7)), l1_batch_commit_data_generator_mode: L1BatchCommitmentMode::Rollup, dummy_verifier: true, + l2_timestamp_asserter_addr: None, + } + } +} + +async fn handle_rpc_response_with_fallback( + rpc_call: F, + fallback: T, + context: String, +) -> anyhow::Result +where + F: Future>, + T: Clone, +{ + match rpc_call.await { + Err(ClientError::Call(err)) + if [ + ErrorCode::MethodNotFound.code(), + // This what `Web3Error::NotImplemented` gets + // `casted` into in the `api` server. + ErrorCode::InternalError.code(), + ] + .contains(&(err.code())) => + { + Ok(fallback) } + response => response.context(context), } } @@ -375,6 +408,9 @@ pub(crate) struct OptionalENConfig { /// Timeout to wait for the Merkle tree database to run compaction on stalled writes. #[serde(default = "OptionalENConfig::default_merkle_tree_stalled_writes_timeout_sec")] merkle_tree_stalled_writes_timeout_sec: u64, + /// Enables the stale keys repair task for the Merkle tree. + #[serde(default)] + pub merkle_tree_repair_stale_keys: bool, // Postgres config (new parameters) /// Threshold in milliseconds for the DB connection lifetime to denote it as long-living and log its details. @@ -447,6 +483,9 @@ pub(crate) struct OptionalENConfig { pub gateway_url: Option, /// Interval for bridge addresses refreshing in seconds. bridge_addresses_refresh_interval_sec: Option, + /// Minimum time between current block.timestamp and the end of the asserted range for TimestampAsserter + #[serde(default = "OptionalENConfig::default_timestamp_asserter_min_time_till_end_sec")] + pub timestamp_asserter_min_time_till_end_sec: u32, } impl OptionalENConfig { @@ -603,6 +642,12 @@ impl OptionalENConfig { merkle_tree.stalled_writes_timeout_sec, default_merkle_tree_stalled_writes_timeout_sec ), + merkle_tree_repair_stale_keys: general_config + .db_config + .as_ref() + .map_or(false, |config| { + config.experimental.merkle_tree_repair_stale_keys + }), database_long_connection_threshold_ms: load_config!( general_config.postgres_config, long_connection_threshold_ms @@ -678,6 +723,11 @@ impl OptionalENConfig { contracts_diamond_proxy_addr: None, gateway_url: enconfig.gateway_url.clone(), bridge_addresses_refresh_interval_sec: enconfig.bridge_addresses_refresh_interval_sec, + timestamp_asserter_min_time_till_end_sec: general_config + .timestamp_asserter_config + .as_ref() + .map(|x| x.min_time_till_end_sec) + .unwrap_or_else(Self::default_timestamp_asserter_min_time_till_end_sec), }) } @@ -812,6 +862,10 @@ impl OptionalENConfig { 3_600 * 24 * 7 // 7 days } + const fn default_timestamp_asserter_min_time_till_end_sec() -> u32 { + 60 + } + fn from_env() -> anyhow::Result { let mut result: OptionalENConfig = envy::prefixed("EN_") .from_env() @@ -1403,6 +1457,7 @@ impl From<&ExternalNodeConfig> for InternalApiConfig { l2_erc20_default_bridge: config.remote.l2_erc20_bridge_addr, l1_shared_default_bridge: config.remote.l1_shared_bridge_proxy_addr, l2_shared_default_bridge: config.remote.l2_shared_bridge_addr, + l2_legacy_shared_bridge: config.remote.l2_legacy_shared_bridge_addr, l1_weth_bridge: config.remote.l1_weth_bridge_addr, l2_weth_bridge: config.remote.l2_weth_bridge_addr, }, @@ -1417,6 +1472,7 @@ impl From<&ExternalNodeConfig> for InternalApiConfig { filters_disabled: config.optional.filters_disabled, dummy_verifier: config.remote.dummy_verifier, l1_batch_commit_data_generator_mode: config.remote.l1_batch_commit_data_generator_mode, + timestamp_asserter_address: config.remote.l2_timestamp_asserter_addr, } } } @@ -1439,6 +1495,17 @@ impl From<&ExternalNodeConfig> for TxSenderConfig { chain_id: config.required.l2_chain_id, // Does not matter for EN. whitelisted_tokens_for_aa: Default::default(), + timestamp_asserter_params: config.remote.l2_timestamp_asserter_addr.map(|address| { + TimestampAsserterParams { + address, + min_time_till_end: Duration::from_secs( + config + .optional + .timestamp_asserter_min_time_till_end_sec + .into(), + ), + } + }), } } } diff --git a/core/bin/external_node/src/config/tests.rs b/core/bin/external_node/src/config/tests.rs index a32be3eff725..dc74d124b18e 100644 --- a/core/bin/external_node/src/config/tests.rs +++ b/core/bin/external_node/src/config/tests.rs @@ -128,6 +128,7 @@ fn parsing_optional_config_from_env() { "zks_getProof=100,eth_call=2", ), ("EN_L1_BATCH_COMMIT_DATA_GENERATOR_MODE", "Validium"), + ("EN_TIMESTAMP_ASSERTER_MIN_TIME_TILL_END_SEC", "2"), ]; let env_vars = env_vars .into_iter() diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index 7d8489013535..5c70fd436781 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -11,7 +11,9 @@ use zksync_config::{ }, PostgresConfig, }; -use zksync_metadata_calculator::{MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig}; +use zksync_metadata_calculator::{ + MerkleTreeReaderConfig, MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig, +}; use zksync_node_api_server::web3::Namespace; use zksync_node_framework::{ implementations::layers::{ @@ -25,7 +27,7 @@ use zksync_node_framework::{ logs_bloom_backfill::LogsBloomBackfillLayer, main_node_client::MainNodeClientLayer, main_node_fee_params_fetcher::MainNodeFeeParamsFetcherLayer, - metadata_calculator::MetadataCalculatorLayer, + metadata_calculator::{MetadataCalculatorLayer, TreeApiServerLayer}, node_storage_init::{ external_node_strategy::{ExternalNodeInitStrategyLayer, SnapshotRecoveryConfig}, NodeStorageInitializerLayer, @@ -55,6 +57,7 @@ use zksync_node_framework::{ service::{ZkStackService, ZkStackServiceBuilder}, }; use zksync_state::RocksdbStorageOptions; +use zksync_types::L2_NATIVE_TOKEN_VAULT_ADDRESS; use crate::{config::ExternalNodeConfig, metrics::framework::ExternalNodeMetricsLayer, Component}; @@ -192,11 +195,22 @@ impl ExternalNodeBuilder { // compression. const OPTIONAL_BYTECODE_COMPRESSION: bool = true; + let l2_shared_bridge_addr = self + .config + .remote + .l2_shared_bridge_addr + .context("Missing `l2_shared_bridge_addr`")?; + let l2_legacy_shared_bridge_addr = if l2_shared_bridge_addr == L2_NATIVE_TOKEN_VAULT_ADDRESS + { + // System has migrated to `L2_NATIVE_TOKEN_VAULT_ADDRESS`, use legacy shared bridge address from main node. + self.config.remote.l2_legacy_shared_bridge_addr + } else { + // System hasn't migrated on `L2_NATIVE_TOKEN_VAULT_ADDRESS`, we can safely use `l2_shared_bridge_addr`. + Some(l2_shared_bridge_addr) + }; + let persistence_layer = OutputHandlerLayer::new( - self.config - .remote - .l2_shared_bridge_addr - .expect("L2 shared bridge address is not set"), + l2_legacy_shared_bridge_addr, self.config.optional.l2_block_seal_queue_capacity, ) .with_pre_insert_txs(true) // EN requires txs to be pre-inserted. @@ -364,6 +378,11 @@ impl ExternalNodeBuilder { layer = layer.with_tree_api_config(merkle_tree_api_config); } + // Add stale keys repair task if requested. + if self.config.optional.merkle_tree_repair_stale_keys { + layer = layer.with_stale_keys_repair(); + } + // Add tree pruning if needed. if self.config.optional.pruning_enabled { layer = layer.with_pruning_config(self.config.optional.pruning_removal_delay()); @@ -373,6 +392,29 @@ impl ExternalNodeBuilder { Ok(self) } + fn add_isolated_tree_api_layer(mut self) -> anyhow::Result { + let reader_config = MerkleTreeReaderConfig { + db_path: self.config.required.merkle_tree_path.clone(), + max_open_files: self.config.optional.merkle_tree_max_open_files, + multi_get_chunk_size: self.config.optional.merkle_tree_multi_get_chunk_size, + block_cache_capacity: self.config.optional.merkle_tree_block_cache_size(), + include_indices_and_filters_in_block_cache: self + .config + .optional + .merkle_tree_include_indices_and_filters_in_block_cache, + }; + let api_config = MerkleTreeApiConfig { + port: self + .config + .tree_component + .api_port + .context("should contain tree api port")?, + }; + self.node + .add_layer(TreeApiServerLayer::new(reader_config, api_config)); + Ok(self) + } + fn add_tx_sender_layer(mut self) -> anyhow::Result { let postgres_storage_config = PostgresStorageCachesConfig { factory_deps_cache_size: self.config.optional.factory_deps_cache_size() as u64, @@ -595,11 +637,11 @@ impl ExternalNodeBuilder { self = self.add_metadata_calculator_layer(with_tree_api)?; } Component::TreeApi => { - anyhow::ensure!( - components.contains(&Component::Tree), - "Merkle tree API cannot be started without a tree component" - ); - // Do nothing, will be handled by the `Tree` component. + if components.contains(&Component::Tree) { + // Do nothing, will be handled by the `Tree` component. + } else { + self = self.add_isolated_tree_api_layer()?; + } } Component::TreeFetcher => { self = self.add_tree_data_fetcher_layer()?; diff --git a/core/bin/external_node/src/tests/mod.rs b/core/bin/external_node/src/tests/mod.rs index b21dbd0db9a3..59aceea819f1 100644 --- a/core/bin/external_node/src/tests/mod.rs +++ b/core/bin/external_node/src/tests/mod.rs @@ -17,15 +17,23 @@ mod utils; const SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10); const POLL_INTERVAL: Duration = Duration::from_millis(100); -#[test_casing(3, ["all", "core", "api"])] +#[test_casing(4, ["all", "core", "api", "core,tree_api"])] #[tokio::test] #[tracing::instrument] // Add args to the test logs async fn external_node_basics(components_str: &'static str) { let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging - let (env, env_handles) = utils::TestEnvironment::with_genesis_block(components_str).await; - let expected_health_components = utils::expected_health_components(&env.components); + let mut expected_health_components = utils::expected_health_components(&env.components); + let expected_shutdown_components = expected_health_components.clone(); + let has_core_or_api = env.components.0.iter().any(|component| { + [Component::Core, Component::HttpApi, Component::WsApi].contains(component) + }); + if has_core_or_api { + // The `sync_state` component doesn't signal its shutdown, but should be present in the list of components + expected_health_components.push("sync_state"); + } + let l2_client = utils::mock_l2_client(&env); let eth_client = utils::mock_eth_client(env.config.diamond_proxy_address()); @@ -84,7 +92,7 @@ async fn external_node_basics(components_str: &'static str) { let health_data = app_health.check_health().await; tracing::info!(?health_data, "final health data"); assert_matches!(health_data.inner().status(), HealthStatus::ShutDown); - for name in expected_health_components { + for name in expected_shutdown_components { let component_health = &health_data.components()[name]; assert_matches!(component_health.status(), HealthStatus::ShutDown); } @@ -162,40 +170,3 @@ async fn running_tree_without_core_is_not_allowed() { err ); } - -#[tokio::test] -async fn running_tree_api_without_tree_is_not_allowed() { - let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging - let (env, _env_handles) = utils::TestEnvironment::with_genesis_block("core,tree_api").await; - - let l2_client = utils::mock_l2_client(&env); - let eth_client = utils::mock_eth_client(env.config.diamond_proxy_address()); - - let node_handle = tokio::task::spawn_blocking(move || { - std::thread::spawn(move || { - let mut node = ExternalNodeBuilder::new(env.config)?; - inject_test_layers( - &mut node, - env.sigint_receiver, - env.app_health_sender, - eth_client, - l2_client, - ); - - // We're only interested in the error, so we drop the result. - node.build(env.components.0.into_iter().collect()).map(drop) - }) - .join() - .unwrap() - }); - - // Check that we cannot build the node without the core component. - let result = node_handle.await.expect("Building the node panicked"); - let err = result.expect_err("Building the node with tree api but without tree should fail"); - assert!( - err.to_string() - .contains("Merkle tree API cannot be started without a tree component"), - "Unexpected errror: {}", - err - ); -} diff --git a/core/bin/genesis_generator/Cargo.toml b/core/bin/genesis_generator/Cargo.toml index 1ece9ea09d2e..d0bbcb668713 100644 --- a/core/bin/genesis_generator/Cargo.toml +++ b/core/bin/genesis_generator/Cargo.toml @@ -15,7 +15,6 @@ publish = false zksync_config.workspace = true zksync_env_config.workspace = true zksync_protobuf_config.workspace = true -zksync_utils.workspace = true zksync_types.workspace = true zksync_core_leftovers.workspace = true zksync_dal.workspace = true diff --git a/core/bin/snapshots_creator/src/tests.rs b/core/bin/snapshots_creator/src/tests.rs index a440d836b4c9..f3c191388803 100644 --- a/core/bin/snapshots_creator/src/tests.rs +++ b/core/bin/snapshots_creator/src/tests.rs @@ -167,6 +167,7 @@ async fn create_l2_block( base_fee_per_gas: 0, gas_per_pubdata_limit: 0, batch_fee_input: Default::default(), + pubdata_params: Default::default(), base_system_contracts_hashes: Default::default(), protocol_version: Some(Default::default()), virtual_blocks: 0, diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index ce7182a3aa4a..e3558de3e6a1 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -3,13 +3,13 @@ use std::{cell::RefCell, rc::Rc}; use once_cell::sync::Lazy; use zksync_contracts::{ load_sys_contract, read_bootloader_code, read_bytecode_from_path, read_sys_contract_bytecode, - read_zbin_bytecode, BaseSystemContracts, ContractLanguage, SystemContractCode, + read_yul_bytecode, BaseSystemContracts, ContractLanguage, SystemContractCode, }; use zksync_multivm::{ interface::{ storage::{InMemoryStorage, StorageView, WriteStorage}, tracer::VmExecutionStopReason, - L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, + InspectExecutionMode, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmFactory, VmInterface, VmInterfaceExt, }, tracers::dynamic::vm_1_5_0::DynTracer, @@ -21,13 +21,13 @@ use zksync_multivm::{ zk_evm_latest::aux_structures::Timestamp, }; use zksync_types::{ - block::L2BlockHasher, ethabi::Token, fee::Fee, fee_model::BatchFeeInput, l1::L1Tx, l2::L2Tx, - utils::storage_key_for_eth_balance, AccountTreeId, Address, Execute, K256PrivateKey, - L1BatchNumber, L1TxCommonData, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, StorageKey, - Transaction, BOOTLOADER_ADDRESS, SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_GAS_PRICE_POSITION, - SYSTEM_CONTEXT_TX_ORIGIN_POSITION, U256, ZKPORTER_IS_AVAILABLE, + block::L2BlockHasher, bytecode::BytecodeHash, ethabi::Token, fee::Fee, + fee_model::BatchFeeInput, l1::L1Tx, l2::L2Tx, u256_to_h256, utils::storage_key_for_eth_balance, + AccountTreeId, Address, Execute, K256PrivateKey, L1BatchNumber, L1TxCommonData, L2BlockNumber, + L2ChainId, Nonce, ProtocolVersionId, StorageKey, Transaction, BOOTLOADER_ADDRESS, + SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_GAS_PRICE_POSITION, SYSTEM_CONTEXT_TX_ORIGIN_POSITION, + U256, ZKPORTER_IS_AVAILABLE, }; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, u256_to_h256}; use crate::intrinsic_costs::VmSpentResourcesResult; @@ -62,19 +62,19 @@ impl VmTracer for SpecialBootloaderTracer pub static GAS_TEST_SYSTEM_CONTRACTS: Lazy = Lazy::new(|| { let bytecode = read_bootloader_code("gas_test"); - let hash = hash_bytecode(&bytecode); + let hash = BytecodeHash::for_bytecode(&bytecode).value(); let bootloader = SystemContractCode { - code: bytes_to_be_words(bytecode), + code: bytecode, hash, }; let bytecode = read_sys_contract_bytecode("", "DefaultAccount", ContractLanguage::Sol); - let hash = hash_bytecode(&bytecode); + let hash = BytecodeHash::for_bytecode(&bytecode).value(); BaseSystemContracts { default_aa: SystemContractCode { - code: bytes_to_be_words(bytecode), + code: bytecode, hash, }, bootloader, @@ -176,10 +176,10 @@ fn read_bootloader_test_code(test: &str) -> Vec { )){ contract } else { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", + read_yul_bytecode( + "contracts/system-contracts/bootloader/tests/artifacts", test - )) + ) } } @@ -207,23 +207,23 @@ fn default_l1_batch() -> L1BatchEnv { /// returns the amount of gas needed to perform and internal transfer, assuming no gas price /// per pubdata, i.e. under assumption that the refund will not touch any new slots. pub(super) fn execute_internal_transfer_test() -> u32 { - let raw_storage = InMemoryStorage::with_system_contracts(hash_bytecode); + let raw_storage = InMemoryStorage::with_system_contracts(); let mut storage_view = StorageView::new(raw_storage); let bootloader_balance_key = storage_key_for_eth_balance(&BOOTLOADER_ADDRESS); storage_view.set_value(bootloader_balance_key, u256_to_h256(U256([0, 0, 1, 0]))); let bytecode = read_bootloader_test_code("transfer_test"); - let hash = hash_bytecode(&bytecode); + let hash = BytecodeHash::for_bytecode(&bytecode).value(); let bootloader = SystemContractCode { - code: bytes_to_be_words(bytecode), + code: bytecode, hash, }; let l1_batch = default_l1_batch(); let bytecode = read_sys_contract_bytecode("", "DefaultAccount", ContractLanguage::Sol); - let hash = hash_bytecode(&bytecode); + let hash = BytecodeHash::for_bytecode(&bytecode).value(); let default_aa = SystemContractCode { - code: bytes_to_be_words(bytecode), + code: bytecode, hash, }; @@ -263,7 +263,11 @@ pub(super) fn execute_internal_transfer_test() -> u32 { } input }; - let input: Vec<_> = bytes_to_be_words(input).into_iter().enumerate().collect(); + let input: Vec<_> = input + .chunks(32) + .map(U256::from_big_endian) + .enumerate() + .collect(); let tracer_result = Rc::new(RefCell::new(0)); let tracer = SpecialBootloaderTracer { @@ -271,8 +275,9 @@ pub(super) fn execute_internal_transfer_test() -> u32 { output: tracer_result.clone(), } .into_tracer_pointer(); + let mut vm: Vm<_, HistoryEnabled> = Vm::new(l1_batch, system_env, storage_view.to_rc_ptr()); - let result = vm.inspect(&mut tracer.into(), VmExecutionMode::Bootloader); + let result = vm.inspect(&mut tracer.into(), InspectExecutionMode::Bootloader); assert!(!result.result.is_failed(), "The internal call has reverted"); tracer_result.take() @@ -287,7 +292,7 @@ pub(super) fn execute_user_txs_in_test_gas_vm( .iter() .fold(U256::zero(), |sum, elem| sum + elem.gas_limit()); - let raw_storage = InMemoryStorage::with_system_contracts(hash_bytecode); + let raw_storage = InMemoryStorage::with_system_contracts(); let mut storage_view = StorageView::new(raw_storage); for tx in txs.iter() { @@ -331,7 +336,7 @@ pub(super) fn execute_user_txs_in_test_gas_vm( let mut total_gas_refunded = 0; for tx in txs { vm.push_transaction(tx); - let tx_execution_result = vm.execute(VmExecutionMode::OneTx); + let tx_execution_result = vm.execute(InspectExecutionMode::OneTx); total_gas_refunded += tx_execution_result.refunds.gas_refunded; if !accept_failure { @@ -343,7 +348,7 @@ pub(super) fn execute_user_txs_in_test_gas_vm( } } - let result = vm.execute(VmExecutionMode::Bootloader); + let result = vm.execute(InspectExecutionMode::Bootloader); let metrics = result.get_execution_metrics(None); VmSpentResourcesResult { diff --git a/core/bin/zksync_server/Cargo.toml b/core/bin/zksync_server/Cargo.toml index 031183924064..4cf028be8210 100644 --- a/core/bin/zksync_server/Cargo.toml +++ b/core/bin/zksync_server/Cargo.toml @@ -17,7 +17,6 @@ zksync_env_config.workspace = true zksync_eth_client.workspace = true zksync_protobuf_config.workspace = true zksync_storage.workspace = true -zksync_utils.workspace = true zksync_types.workspace = true zksync_core_leftovers.workspace = true zksync_node_genesis.workspace = true diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index da0a93f624df..51e7b409c9a0 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -7,16 +7,15 @@ use zksync_config::{ api::{HealthCheckConfig, MerkleTreeApiConfig, Web3JsonRpcConfig}, chain::{ CircuitBreakerConfig, MempoolConfig, NetworkConfig, OperationsManagerConfig, - StateKeeperConfig, + StateKeeperConfig, TimestampAsserterConfig, }, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, - secrets::DataAvailabilitySecrets, - BasicWitnessInputProducerConfig, ContractsConfig, DatabaseSecrets, ExperimentalVmConfig, - ExternalPriceApiClientConfig, FriProofCompressorConfig, FriProverConfig, - FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, - L1Secrets, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, - ProtectiveReadsWriterConfig, Secrets, + BasicWitnessInputProducerConfig, ContractsConfig, DataAvailabilitySecrets, DatabaseSecrets, + ExperimentalVmConfig, ExternalPriceApiClientConfig, FriProofCompressorConfig, + FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, + FriWitnessVectorGeneratorConfig, L1Secrets, ObservabilityConfig, PrometheusConfig, + ProofDataHandlerConfig, ProtectiveReadsWriterConfig, Secrets, }, ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, DAClientConfig, DADispatcherConfig, DBConfig, EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, GasAdjusterConfig, @@ -45,7 +44,7 @@ struct Cli { /// Comma-separated list of components to launch. #[arg( long, - default_value = "api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher,vm_runner_protective_reads" + default_value = "api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher,vm_runner_protective_reads" )] components: ComponentsToRun, /// Path to the yaml config. If set, it will be used instead of env vars. @@ -196,5 +195,6 @@ fn load_env_config() -> anyhow::Result { external_proof_integration_api_config: ExternalProofIntegrationApiConfig::from_env().ok(), experimental_vm_config: ExperimentalVmConfig::from_env().ok(), prover_job_monitor_config: None, + timestamp_asserter_config: TimestampAsserterConfig::from_env().ok(), }) } diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index b04227965f8c..794c847a24d5 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -1,18 +1,20 @@ //! This module provides a "builder" for the main node, //! as well as an interface to run the node with the specified components. -use anyhow::Context; +use std::time::Duration; + +use anyhow::{bail, Context}; use zksync_config::{ configs::{ - da_client::DAClientConfig, eth_sender::PubdataSendingMode, - secrets::DataAvailabilitySecrets, wallets::Wallets, GeneralConfig, Secrets, + da_client::DAClientConfig, secrets::DataAvailabilitySecrets, wallets::Wallets, + GeneralConfig, Secrets, }, ContractsConfig, GenesisConfig, }; use zksync_core_leftovers::Component; use zksync_metadata_calculator::MetadataCalculatorConfig; use zksync_node_api_server::{ - tx_sender::TxSenderConfig, + tx_sender::{TimestampAsserterParams, TxSenderConfig}, web3::{state::InternalApiConfig, Namespace}, }; use zksync_node_framework::{ @@ -26,8 +28,8 @@ use zksync_node_framework::{ consensus::MainNodeConsensusLayer, contract_verification_api::ContractVerificationApiLayer, da_clients::{ - avail::AvailWiringLayer, no_da::NoDAClientWiringLayer, - object_store::ObjectStorageClientWiringLayer, + avail::AvailWiringLayer, celestia::CelestiaWiringLayer, eigen::EigenWiringLayer, + no_da::NoDAClientWiringLayer, object_store::ObjectStorageClientWiringLayer, }, da_dispatcher::DataAvailabilityDispatcherLayer, eth_sender::{EthTxAggregatorLayer, EthTxManagerLayer}, @@ -55,7 +57,6 @@ use zksync_node_framework::{ main_batch_executor::MainBatchExecutorLayer, mempool_io::MempoolIOLayer, output_handler::OutputHandlerLayer, RocksdbStorageOptions, StateKeeperLayer, }, - tee_verifier_input_producer::TeeVerifierInputProducerLayer, vm_runner::{ bwip::BasicWitnessInputProducerLayer, playground::VmPlaygroundLayer, protective_reads::ProtectiveReadsWriterLayer, @@ -70,7 +71,9 @@ use zksync_node_framework::{ }, service::{ZkStackService, ZkStackServiceBuilder}, }; -use zksync_types::{settlement::SettlementMode, SHARED_BRIDGE_ETHER_TOKEN_ADDRESS}; +use zksync_types::{ + pubdata_da::PubdataSendingMode, settlement::SettlementMode, SHARED_BRIDGE_ETHER_TOKEN_ADDRESS, +}; use zksync_vlog::prometheus::PrometheusExporterConfig; /// Macro that looks into a path to fetch an optional config, @@ -190,7 +193,7 @@ impl MainNodeBuilder { .add_layer(BaseTokenRatioProviderLayer::new(base_token_adjuster_config)); } let state_keeper_config = try_load_config!(self.configs.state_keeper_config); - let l1_gas_layer = L1GasLayer::new(state_keeper_config); + let l1_gas_layer = L1GasLayer::new(&state_keeper_config); self.node.add_layer(l1_gas_layer); Ok(self) } @@ -237,9 +240,7 @@ impl MainNodeBuilder { let wallets = self.wallets.clone(); let sk_config = try_load_config!(self.configs.state_keeper_config); let persistence_layer = OutputHandlerLayer::new( - self.contracts_config - .l2_shared_bridge_addr - .context("L2 shared bridge address")?, + self.contracts_config.l2_legacy_shared_bridge_addr, sk_config.l2_block_seal_queue_capacity, ) .with_protective_reads_persistence_enabled(sk_config.protective_reads_persistence_enabled); @@ -248,6 +249,8 @@ impl MainNodeBuilder { sk_config.clone(), try_load_config!(self.configs.mempool_config), try_load_config!(wallets.state_keeper), + self.contracts_config.l2_da_validator_addr, + self.genesis_config.l1_batch_commit_data_generator_mode, ); let db_config = try_load_config!(self.configs.db_config); let experimental_vm_config = self @@ -288,6 +291,7 @@ impl MainNodeBuilder { self.node.add_layer(ProofDataHandlerLayer::new( try_load_config!(self.configs.proof_data_handler_config), self.genesis_config.l1_batch_commit_data_generator_mode, + self.genesis_config.l2_chain_id, )); Ok(self) } @@ -301,16 +305,36 @@ impl MainNodeBuilder { fn add_tx_sender_layer(mut self) -> anyhow::Result { let sk_config = try_load_config!(self.configs.state_keeper_config); let rpc_config = try_load_config!(self.configs.api_config).web3_json_rpc; + + let timestamp_asserter_params = match self.contracts_config.l2_timestamp_asserter_addr { + Some(address) => { + let timestamp_asserter_config = + try_load_config!(self.configs.timestamp_asserter_config); + Some(TimestampAsserterParams { + address, + min_time_till_end: Duration::from_secs( + timestamp_asserter_config.min_time_till_end_sec.into(), + ), + }) + } + None => None, + }; let postgres_storage_caches_config = PostgresStorageCachesConfig { factory_deps_cache_size: rpc_config.factory_deps_cache_size() as u64, initial_writes_cache_size: rpc_config.initial_writes_cache_size() as u64, latest_values_cache_size: rpc_config.latest_values_cache_size() as u64, latest_values_max_block_lag: rpc_config.latest_values_max_block_lag(), }; + let vm_config = self + .configs + .experimental_vm_config + .clone() + .unwrap_or_default(); // On main node we always use master pool sink. self.node.add_layer(MasterPoolSinkLayer); - self.node.add_layer(TxSenderLayer::new( + + let layer = TxSenderLayer::new( TxSenderConfig::new( &sk_config, &rpc_config, @@ -318,10 +342,13 @@ impl MainNodeBuilder { .fee_account .address(), self.genesis_config.l2_chain_id, + timestamp_asserter_params, ), postgres_storage_caches_config, rpc_config.vm_concurrency_limit(), - )); + ); + let layer = layer.with_vm_mode(vm_config.api_fast_vm_mode); + self.node.add_layer(layer); Ok(self) } @@ -493,14 +520,6 @@ impl MainNodeBuilder { Ok(self) } - fn add_tee_verifier_input_producer_layer(mut self) -> anyhow::Result { - self.node.add_layer(TeeVerifierInputProducerLayer::new( - self.genesis_config.l2_chain_id, - )); - - Ok(self) - } - fn add_da_client_layer(mut self) -> anyhow::Result { let Some(da_client_config) = self.configs.da_client_config.clone() else { tracing::warn!("No config for DA client, using the NoDA client"); @@ -509,16 +528,25 @@ impl MainNodeBuilder { }; let secrets = try_load_config!(self.secrets.data_availability); - match (da_client_config, secrets) { (DAClientConfig::Avail(config), DataAvailabilitySecrets::Avail(secret)) => { self.node.add_layer(AvailWiringLayer::new(config, secret)); } + (DAClientConfig::Celestia(config), DataAvailabilitySecrets::Celestia(secret)) => { + self.node + .add_layer(CelestiaWiringLayer::new(config, secret)); + } + + (DAClientConfig::Eigen(config), DataAvailabilitySecrets::Eigen(secret)) => { + self.node.add_layer(EigenWiringLayer::new(config, secret)); + } + (DAClientConfig::ObjectStore(config), _) => { self.node .add_layer(ObjectStorageClientWiringLayer::new(config)); } + _ => bail!("invalid pair of da_client and da_secrets"), } Ok(self) @@ -573,7 +601,11 @@ impl MainNodeBuilder { } fn add_vm_playground_layer(mut self) -> anyhow::Result { - let vm_config = try_load_config!(self.configs.experimental_vm_config); + let vm_config = self + .configs + .experimental_vm_config + .clone() + .unwrap_or_default(); self.node.add_layer(VmPlaygroundLayer::new( vm_config.playground, self.genesis_config.l2_chain_id, @@ -727,9 +759,6 @@ impl MainNodeBuilder { Component::EthTxManager => { self = self.add_eth_tx_manager_layer()?; } - Component::TeeVerifierInputProducer => { - self = self.add_tee_verifier_input_producer_layer()?; - } Component::Housekeeper => { self = self .add_house_keeper_layer()? diff --git a/core/bin/zksync_tee_prover/Cargo.toml b/core/bin/zksync_tee_prover/Cargo.toml index 85908eebeaaa..b853da348ee0 100644 --- a/core/bin/zksync_tee_prover/Cargo.toml +++ b/core/bin/zksync_tee_prover/Cargo.toml @@ -15,7 +15,7 @@ publish = false anyhow.workspace = true async-trait.workspace = true envy.workspace = true -reqwest.workspace = true +reqwest = { workspace = true, features = ["zstd"] } secp256k1 = { workspace = true, features = ["serde"] } serde = { workspace = true, features = ["derive"] } thiserror.workspace = true diff --git a/core/bin/zksync_tee_prover/src/api_client.rs b/core/bin/zksync_tee_prover/src/api_client.rs index 13fbc1ba8868..ffc2839b8d3b 100644 --- a/core/bin/zksync_tee_prover/src/api_client.rs +++ b/core/bin/zksync_tee_prover/src/api_client.rs @@ -1,13 +1,10 @@ -use reqwest::Client; +use reqwest::{Client, Response, StatusCode}; use secp256k1::{ecdsa::Signature, PublicKey}; -use serde::{de::DeserializeOwned, Serialize}; +use serde::Serialize; use url::Url; use zksync_basic_types::H256; use zksync_prover_interface::{ - api::{ - RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitTeeProofRequest, - SubmitTeeProofResponse, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, - }, + api::{RegisterTeeAttestationRequest, SubmitTeeProofRequest, TeeProofGenerationDataRequest}, inputs::TeeVerifierInput, outputs::L1BatchTeeProofForL1, }; @@ -31,10 +28,9 @@ impl TeeApiClient { } } - async fn post(&self, endpoint: S, request: Req) -> Result + async fn post(&self, endpoint: S, request: Req) -> Result where Req: Serialize + std::fmt::Debug, - Resp: DeserializeOwned, S: AsRef, { let url = self.api_base_url.join(endpoint.as_ref()).unwrap(); @@ -46,9 +42,7 @@ impl TeeApiClient { .json(&request) .send() .await? - .error_for_status()? - .json::() - .await + .error_for_status() } /// Registers the attestation quote with the TEE prover interface API, effectively proving that @@ -63,8 +57,7 @@ impl TeeApiClient { attestation: attestation_quote_bytes, pubkey: public_key.serialize().to_vec(), }; - self.post::<_, RegisterTeeAttestationResponse, _>("/tee/register_attestation", request) - .await?; + self.post("/tee/register_attestation", request).await?; tracing::info!( "Attestation quote was successfully registered for the public key {}", public_key @@ -77,12 +70,17 @@ impl TeeApiClient { pub async fn get_job( &self, tee_type: TeeType, - ) -> Result>, TeeProverError> { + ) -> Result, TeeProverError> { let request = TeeProofGenerationDataRequest { tee_type }; - let response = self - .post::<_, TeeProofGenerationDataResponse, _>("/tee/proof_inputs", request) - .await?; - Ok(response.0) + let response = self.post("/tee/proof_inputs", request).await?; + match response.status() { + StatusCode::OK => Ok(Some(response.json::().await?)), + StatusCode::NO_CONTENT => Ok(None), + _ => response + .json::>() + .await + .map_err(TeeProverError::Request), + } } /// Submits the successfully verified proof to the TEE prover interface API. @@ -101,7 +99,7 @@ impl TeeApiClient { tee_type, })); let observer = METRICS.proof_submitting_time.start(); - self.post::<_, SubmitTeeProofResponse, _>( + self.post( format!("/tee/submit_proofs/{batch_number}").as_str(), request, ) diff --git a/core/bin/zksync_tee_prover/src/main.rs b/core/bin/zksync_tee_prover/src/main.rs index 70c6f888185a..aa0881011da1 100644 --- a/core/bin/zksync_tee_prover/src/main.rs +++ b/core/bin/zksync_tee_prover/src/main.rs @@ -45,11 +45,12 @@ fn main() -> anyhow::Result<()> { .add_layer(SigintHandlerLayer) .add_layer(TeeProverLayer::new(tee_prover_config)); - if let Some(gateway) = prometheus_config.gateway_endpoint() { - let exporter_config = - PrometheusExporterConfig::push(gateway, prometheus_config.push_interval()); - builder.add_layer(PrometheusExporterLayer(exporter_config)); - } + let exporter_config = if let Some(gateway) = prometheus_config.gateway_endpoint() { + PrometheusExporterConfig::push(gateway, prometheus_config.push_interval()) + } else { + PrometheusExporterConfig::pull(prometheus_config.listener_port) + }; + builder.add_layer(PrometheusExporterLayer(exporter_config)); builder.build().run(observability_guard)?; Ok(()) diff --git a/core/bin/zksync_tee_prover/src/metrics.rs b/core/bin/zksync_tee_prover/src/metrics.rs index 9f535967f79f..769a8bbc7e0f 100644 --- a/core/bin/zksync_tee_prover/src/metrics.rs +++ b/core/bin/zksync_tee_prover/src/metrics.rs @@ -2,7 +2,7 @@ use std::time::Duration; -use vise::{Buckets, Gauge, Histogram, Metrics, Unit}; +use vise::{Buckets, Counter, Gauge, Histogram, Metrics, Unit}; #[derive(Debug, Metrics)] #[metrics(prefix = "tee_prover")] @@ -13,7 +13,7 @@ pub(crate) struct TeeProverMetrics { pub proof_generation_time: Histogram, #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] pub proof_submitting_time: Histogram, - pub network_errors_counter: Gauge, + pub network_errors_counter: Counter, pub last_batch_number_processed: Gauge, } diff --git a/core/bin/zksync_tee_prover/src/tee_prover.rs b/core/bin/zksync_tee_prover/src/tee_prover.rs index 1511f0c88e3d..5d22d1e7c630 100644 --- a/core/bin/zksync_tee_prover/src/tee_prover.rs +++ b/core/bin/zksync_tee_prover/src/tee_prover.rs @@ -90,9 +90,9 @@ impl TeeProver { } async fn step(&self, public_key: &PublicKey) -> Result, TeeProverError> { - match self.api_client.get_job(self.config.tee_type).await? { - Some(job) => { - let (signature, batch_number, root_hash) = self.verify(*job)?; + match self.api_client.get_job(self.config.tee_type).await { + Ok(Some(job)) => { + let (signature, batch_number, root_hash) = self.verify(job)?; self.api_client .submit_proof( batch_number, @@ -104,10 +104,11 @@ impl TeeProver { .await?; Ok(Some(batch_number)) } - None => { + Ok(None) => { tracing::trace!("There are currently no pending batches to be proven"); Ok(None) } + Err(err) => Err(err), } } } @@ -154,7 +155,7 @@ impl Task for TeeProver { } } Err(err) => { - METRICS.network_errors_counter.inc_by(1); + METRICS.network_errors_counter.inc(); if !err.is_retriable() || retries > config.max_retries { return Err(err.into()); } diff --git a/core/lib/basic_types/Cargo.toml b/core/lib/basic_types/Cargo.toml index 616b959b0783..6cac4f60f615 100644 --- a/core/lib/basic_types/Cargo.toml +++ b/core/lib/basic_types/Cargo.toml @@ -11,8 +11,10 @@ keywords.workspace = true categories.workspace = true [dependencies] +const-decoder.workspace = true ethabi.workspace = true hex.workspace = true +sha2.workspace = true tiny-keccak.workspace = true thiserror.workspace = true serde = { workspace = true, features = ["derive"] } diff --git a/core/lib/basic_types/src/bytecode.rs b/core/lib/basic_types/src/bytecode.rs new file mode 100644 index 000000000000..585ba0ef8c88 --- /dev/null +++ b/core/lib/basic_types/src/bytecode.rs @@ -0,0 +1,236 @@ +//! Bytecode-related types and utils. +//! +//! # Bytecode kinds +//! +//! ZKsync supports 2 kinds of bytecodes: EraVM and EVM ones. +//! +//! - **EraVM** bytecodes consist of 64-bit (8-byte) instructions for the corresponding VM. +//! - **EVM** bytecodes consist of ordinary EVM opcodes, preceded with a 32-byte big-endian code length (in bytes). +//! +//! Both bytecode kinds are right-padded to consist of an integer, odd number of 32-byte words. All methods +//! in this module operate on padded bytecodes unless explicitly specified otherwise. + +use anyhow::Context as _; +use sha2::{Digest, Sha256}; + +use crate::{H256, U256}; + +const MAX_BYTECODE_LENGTH_IN_WORDS: usize = (1 << 16) - 1; +const MAX_BYTECODE_LENGTH_BYTES: usize = MAX_BYTECODE_LENGTH_IN_WORDS * 32; + +/// Errors returned from [`validate_bytecode()`]. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum InvalidBytecodeError { + /// Bytecode is too long. + #[error("Bytecode too long: {0} bytes, while max {1} allowed")] + BytecodeTooLong(usize, usize), + /// Bytecode length isn't divisible by 32 (i.e., bytecode cannot be represented as a sequence of 32-byte EraVM words). + #[error("Bytecode length is not divisible by 32")] + BytecodeLengthIsNotDivisibleBy32, + /// Bytecode has an even number of 32-byte words. + #[error("Bytecode has even number of 32-byte words")] + BytecodeLengthInWordsIsEven, +} + +/// Validates that the given bytecode passes basic checks (e.g., not too long). +/// +/// The performed checks are universal both for EraVM and (padded) EVM bytecodes. If you need to additionally check EVM bytecode integrity, +/// use [`trim_padded_evm_bytecode()`]. +pub fn validate_bytecode(code: &[u8]) -> Result<(), InvalidBytecodeError> { + let bytecode_len = code.len(); + + if bytecode_len > MAX_BYTECODE_LENGTH_BYTES { + return Err(InvalidBytecodeError::BytecodeTooLong( + bytecode_len, + MAX_BYTECODE_LENGTH_BYTES, + )); + } + + if bytecode_len % 32 != 0 { + return Err(InvalidBytecodeError::BytecodeLengthIsNotDivisibleBy32); + } + + let bytecode_len_words = bytecode_len / 32; + + if bytecode_len_words % 2 == 0 { + return Err(InvalidBytecodeError::BytecodeLengthInWordsIsEven); + } + + Ok(()) +} + +/// 32-byte bytecode hash. Besides a cryptographically secure hash of the bytecode contents, contains a [`BytecodeMarker`] +/// and the bytecode length. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct BytecodeHash(H256); + +impl BytecodeHash { + /// Hashes the provided EraVM bytecode. + pub fn for_bytecode(bytecode: &[u8]) -> Self { + Self::for_generic_bytecode(BytecodeMarker::EraVm, bytecode) + } + + /// Hashes the provided padded EVM bytecode. + pub fn for_evm_bytecode(bytecode: &[u8]) -> Self { + Self::for_generic_bytecode(BytecodeMarker::Evm, bytecode) + } + + fn for_generic_bytecode(kind: BytecodeMarker, bytecode: &[u8]) -> Self { + validate_bytecode(bytecode).expect("invalid bytecode"); + + let mut hasher = Sha256::new(); + let len = match kind { + BytecodeMarker::EraVm => (bytecode.len() / 32) as u16, + BytecodeMarker::Evm => bytecode.len() as u16, + }; + hasher.update(bytecode); + let result = hasher.finalize(); + + let mut output = [0u8; 32]; + output[..].copy_from_slice(result.as_slice()); + output[0] = kind as u8; + output[1] = 0; + output[2..4].copy_from_slice(&len.to_be_bytes()); + + Self(H256(output)) + } + + /// Returns a marker / kind of this bytecode. + pub fn marker(&self) -> BytecodeMarker { + match self.0.as_bytes()[0] { + val if val == BytecodeMarker::EraVm as u8 => BytecodeMarker::EraVm, + val if val == BytecodeMarker::Evm as u8 => BytecodeMarker::Evm, + _ => unreachable!(), + } + } + + /// Returns the length of the hashed bytecode in bytes. + pub fn len_in_bytes(&self) -> usize { + let bytes = self.0.as_bytes(); + let raw_len = u16::from_be_bytes([bytes[2], bytes[3]]); + match self.marker() { + BytecodeMarker::EraVm => raw_len as usize * 32, + BytecodeMarker::Evm => raw_len as usize, + } + } + + /// Returns the underlying hash value. + pub fn value(self) -> H256 { + self.0 + } + + /// Returns the underlying hash value interpreted as a big-endian unsigned integer. + pub fn value_u256(self) -> U256 { + crate::h256_to_u256(self.0) + } +} + +impl TryFrom for BytecodeHash { + type Error = anyhow::Error; + + fn try_from(raw_hash: H256) -> Result { + BytecodeMarker::new(raw_hash).context("unknown bytecode hash marker")?; + Ok(Self(raw_hash)) + } +} + +/// Bytecode marker encoded in the first byte of the bytecode hash. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[repr(u8)] +pub enum BytecodeMarker { + /// EraVM bytecode marker (1). + EraVm = 1, + /// EVM bytecode marker (2). + Evm = 2, +} + +impl BytecodeMarker { + /// Parses a marker from the bytecode hash. + pub fn new(bytecode_hash: H256) -> Option { + Some(match bytecode_hash.as_bytes()[0] { + val if val == Self::EraVm as u8 => Self::EraVm, + val if val == Self::Evm as u8 => Self::Evm, + _ => return None, + }) + } +} + +/// Removes padding from an EVM bytecode, returning the original EVM bytecode. +pub fn trim_padded_evm_bytecode(raw: &[u8]) -> anyhow::Result<&[u8]> { + validate_bytecode(raw).context("bytecode fails basic validity checks")?; + + // EVM bytecodes are prefixed with a big-endian `U256` bytecode length. + let bytecode_len_bytes = raw.get(..32).context("length < 32")?; + let bytecode_len = U256::from_big_endian(bytecode_len_bytes); + let bytecode_len: usize = bytecode_len + .try_into() + .map_err(|_| anyhow::anyhow!("length ({bytecode_len}) overflow"))?; + let bytecode = raw.get(32..(32 + bytecode_len)).with_context(|| { + format!( + "prefixed length ({bytecode_len}) exceeds real length ({})", + raw.len() - 32 + ) + })?; + // Since slicing above succeeded, this one is safe. + let padding = &raw[(32 + bytecode_len)..]; + anyhow::ensure!( + padding.iter().all(|&b| b == 0), + "bytecode padding contains non-zero bytes" + ); + Ok(bytecode) +} + +#[doc(hidden)] // only useful for tests +pub mod testonly { + use const_decoder::Decoder; + + pub const RAW_EVM_BYTECODE: &[u8] = &const_decoder::decode!( + Decoder::Hex, + b"00000000000000000000000000000000000000000000000000000000000001266080604052348015\ + 600e575f80fd5b50600436106030575f3560e01c8063816898ff146034578063fb5343f314604c57\ + 5b5f80fd5b604a60048036038101906046919060a6565b6066565b005b6052606f565b604051605d\ + 919060d9565b60405180910390f35b805f8190555050565b5f5481565b5f80fd5b5f819050919050\ + 565b6088816078565b81146091575f80fd5b50565b5f8135905060a0816081565b92915050565b5f\ + 6020828403121560b85760b76074565b5b5f60c3848285016094565b91505092915050565b60d381\ + 6078565b82525050565b5f60208201905060ea5f83018460cc565b9291505056fea2646970667358\ + 221220caca1247066da378f2ec77c310f2ae51576272367b4fa11cc4350af4e9ce4d0964736f6c63\ + 4300081a00330000000000000000000000000000000000000000000000000000" + ); + pub const PROCESSED_EVM_BYTECODE: &[u8] = &const_decoder::decode!( + Decoder::Hex, + b"6080604052348015600e575f80fd5b50600436106030575f3560e01c8063816898ff146034578063\ + fb5343f314604c575b5f80fd5b604a60048036038101906046919060a6565b6066565b005b605260\ + 6f565b604051605d919060d9565b60405180910390f35b805f8190555050565b5f5481565b5f80fd\ + 5b5f819050919050565b6088816078565b81146091575f80fd5b50565b5f8135905060a081608156\ + 5b92915050565b5f6020828403121560b85760b76074565b5b5f60c3848285016094565b91505092\ + 915050565b60d3816078565b82525050565b5f60208201905060ea5f83018460cc565b9291505056\ + fea2646970667358221220caca1247066da378f2ec77c310f2ae51576272367b4fa11cc4350af4e9\ + ce4d0964736f6c634300081a0033" + ); +} + +#[cfg(test)] +mod tests { + use super::{ + testonly::{PROCESSED_EVM_BYTECODE, RAW_EVM_BYTECODE}, + *, + }; + + #[test] + fn bytecode_markers_are_valid() { + let bytecode_hash = BytecodeHash::for_bytecode(&[0; 32]); + assert_eq!(bytecode_hash.marker(), BytecodeMarker::EraVm); + assert_eq!(bytecode_hash.len_in_bytes(), 32); + + let bytecode_hash = BytecodeHash::for_evm_bytecode(&[0; 32]); + assert_eq!(bytecode_hash.marker(), BytecodeMarker::Evm); + assert_eq!(bytecode_hash.len_in_bytes(), 32); + } + + #[test] + fn preparing_evm_bytecode() { + let prepared = trim_padded_evm_bytecode(RAW_EVM_BYTECODE).unwrap(); + assert_eq!(prepared, PROCESSED_EVM_BYTECODE); + } +} diff --git a/core/lib/basic_types/src/commitment.rs b/core/lib/basic_types/src/commitment.rs index eca339f40f42..0eed46aad782 100644 --- a/core/lib/basic_types/src/commitment.rs +++ b/core/lib/basic_types/src/commitment.rs @@ -1,10 +1,12 @@ +use std::str::FromStr; + use serde::{Deserialize, Serialize}; use strum::{Display, EnumIter}; use crate::{ ethabi, web3::contract::{Detokenize, Error as ContractError}, - U256, + Address, U256, }; #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize, EnumIter, Display)] @@ -41,3 +43,23 @@ impl Detokenize for L1BatchCommitmentMode { } } } + +impl FromStr for L1BatchCommitmentMode { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + match s { + "Rollup" => Ok(Self::Rollup), + "Validium" => Ok(Self::Validium), + _ => { + Err("Incorrect l1 batch commitment mode type; expected one of `Rollup`, `Validium`") + } + } + } +} + +#[derive(Default, Copy, Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct PubdataParams { + pub l2_da_validator_address: Address, + pub pubdata_type: L1BatchCommitmentMode, +} diff --git a/core/lib/basic_types/src/conversions.rs b/core/lib/basic_types/src/conversions.rs new file mode 100644 index 000000000000..544d4adc08f8 --- /dev/null +++ b/core/lib/basic_types/src/conversions.rs @@ -0,0 +1,36 @@ +//! Conversions between basic types. + +use crate::{Address, H256, U256}; + +pub fn h256_to_u256(num: H256) -> U256 { + U256::from_big_endian(num.as_bytes()) +} + +pub fn address_to_h256(address: &Address) -> H256 { + let mut buffer = [0u8; 32]; + buffer[12..].copy_from_slice(address.as_bytes()); + H256(buffer) +} + +pub fn address_to_u256(address: &Address) -> U256 { + h256_to_u256(address_to_h256(address)) +} + +pub fn u256_to_h256(num: U256) -> H256 { + let mut bytes = [0u8; 32]; + num.to_big_endian(&mut bytes); + H256::from_slice(&bytes) +} + +/// Converts `U256` value into an [`Address`]. +pub fn u256_to_address(value: &U256) -> Address { + let mut bytes = [0u8; 32]; + value.to_big_endian(&mut bytes); + + Address::from_slice(&bytes[12..]) +} + +/// Converts `H256` value into an [`Address`]. +pub fn h256_to_address(value: &H256) -> Address { + Address::from_slice(&value.as_bytes()[12..]) +} diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index 197bd8eb7aa2..d79bc57cc5e1 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -22,20 +22,33 @@ pub use ethabi::{ }; use serde::{de, Deserialize, Deserializer, Serialize}; +pub use self::conversions::{ + address_to_h256, address_to_u256, h256_to_address, h256_to_u256, u256_to_address, u256_to_h256, +}; + #[macro_use] mod macros; pub mod basic_fri_types; +pub mod bytecode; pub mod commitment; +mod conversions; pub mod network; pub mod protocol_version; pub mod prover_dal; -pub mod seed_phrase; +pub mod pubdata_da; +pub mod secrets; +pub mod serde_wrappers; pub mod settlement; pub mod tee_types; pub mod url; pub mod vm; pub mod web3; +/// Computes `ceil(a / b)`. +pub fn ceil_div_u256(a: U256, b: U256) -> U256 { + (a + b - U256::from(1)) / b +} + /// Parses H256 from a slice of bytes. pub fn parse_h256(bytes: &[u8]) -> anyhow::Result { Ok(<[u8; 32]>::try_from(bytes).context("invalid size")?.into()) diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs index e01586cdad7d..ebecfaa1b872 100644 --- a/core/lib/basic_types/src/protocol_version.rs +++ b/core/lib/basic_types/src/protocol_version.rs @@ -69,6 +69,7 @@ pub enum ProtocolVersionId { Version24, Version25, Version26, + Version27, } impl ProtocolVersionId { @@ -122,6 +123,7 @@ impl ProtocolVersionId { ProtocolVersionId::Version24 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, ProtocolVersionId::Version25 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, ProtocolVersionId::Version26 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version27 => VmVersion::VmGateway, } } @@ -139,6 +141,10 @@ impl ProtocolVersionId { self <= &Self::Version22 } + pub fn is_pre_gateway(&self) -> bool { + self <= &Self::Version26 + } + pub fn is_1_4_0(&self) -> bool { self >= &ProtocolVersionId::Version18 && self < &ProtocolVersionId::Version20 } @@ -278,6 +284,7 @@ impl From for VmVersion { ProtocolVersionId::Version24 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, ProtocolVersionId::Version25 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, ProtocolVersionId::Version26 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version27 => VmVersion::VmGateway, } } } diff --git a/core/lib/basic_types/src/prover_dal.rs b/core/lib/basic_types/src/prover_dal.rs index bec5a55ced1f..d2af75fe2ff5 100644 --- a/core/lib/basic_types/src/prover_dal.rs +++ b/core/lib/basic_types/src/prover_dal.rs @@ -1,5 +1,5 @@ //! Types exposed by the prover DAL for general-purpose use. -use std::{net::IpAddr, ops::Add, str::FromStr}; +use std::{net::IpAddr, ops::Add, str::FromStr, time::Instant}; use chrono::{DateTime, Duration, NaiveDateTime, NaiveTime, Utc}; use serde::{Deserialize, Serialize}; @@ -18,6 +18,23 @@ pub struct FriProverJobMetadata { pub sequence_number: usize, pub depth: u16, pub is_node_final_proof: bool, + pub pick_time: Instant, +} + +impl FriProverJobMetadata { + /// Checks whether the metadata corresponds to a scheduler proof or not. + pub fn is_scheduler_proof(&self) -> anyhow::Result { + if self.aggregation_round == AggregationRound::Scheduler { + if self.circuit_id != 1 { + return Err(anyhow::anyhow!( + "Invalid circuit id {} for Scheduler proof", + self.circuit_id + )); + } + return Ok(true); + } + Ok(false) + } } #[derive(Debug, Clone, Copy, Default)] @@ -28,12 +45,6 @@ pub struct ExtendedJobCountStatistics { pub successful: usize, } -#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)] -pub struct JobCountStatistics { - pub queued: usize, - pub in_progress: usize, -} - impl Add for ExtendedJobCountStatistics { type Output = ExtendedJobCountStatistics; @@ -47,6 +58,19 @@ impl Add for ExtendedJobCountStatistics { } } +#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)] +pub struct JobCountStatistics { + pub queued: usize, + pub in_progress: usize, +} + +impl JobCountStatistics { + /// all returns sum of queued and in_progress. + pub fn all(&self) -> usize { + self.queued + self.in_progress + } +} + #[derive(Debug)] pub struct StuckJobs { pub id: u64, diff --git a/core/lib/types/src/pubdata_da.rs b/core/lib/basic_types/src/pubdata_da.rs similarity index 54% rename from core/lib/types/src/pubdata_da.rs rename to core/lib/basic_types/src/pubdata_da.rs index bc7dc55e53de..3f042da98ac1 100644 --- a/core/lib/types/src/pubdata_da.rs +++ b/core/lib/basic_types/src/pubdata_da.rs @@ -1,15 +1,17 @@ +//! Types related to data availability. + use chrono::{DateTime, Utc}; use num_enum::TryFromPrimitive; use serde::{Deserialize, Serialize}; -use zksync_basic_types::L1BatchNumber; -use zksync_config::configs::eth_sender::PubdataSendingMode; + +use crate::L1BatchNumber; /// Enum holding the current values used for DA Layers. #[repr(u8)] -#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Serialize)] -#[derive(TryFromPrimitive)] -pub enum PubdataDA { +#[derive(Debug, Clone, Copy, Default, PartialEq, Deserialize, Serialize, TryFromPrimitive)] +pub enum PubdataSendingMode { /// Pubdata is sent to the L1 as a tx calldata. + #[default] Calldata = 0, /// Pubdata is sent to L1 as EIP-4844 blobs. Blobs, @@ -19,17 +21,6 @@ pub enum PubdataDA { RelayedL2Calldata, } -impl From for PubdataDA { - fn from(value: PubdataSendingMode) -> Self { - match value { - PubdataSendingMode::Calldata => PubdataDA::Calldata, - PubdataSendingMode::Blobs => PubdataDA::Blobs, - PubdataSendingMode::Custom => PubdataDA::Custom, - PubdataSendingMode::RelayedL2Calldata => PubdataDA::RelayedL2Calldata, - } - } -} - /// Represents a blob in the data availability layer. #[derive(Debug, Clone)] pub struct DataAvailabilityBlob { diff --git a/core/lib/basic_types/src/secrets.rs b/core/lib/basic_types/src/secrets.rs new file mode 100644 index 000000000000..b3627470660c --- /dev/null +++ b/core/lib/basic_types/src/secrets.rs @@ -0,0 +1,54 @@ +use std::str::FromStr; + +use secrecy::{ExposeSecret, Secret}; + +#[derive(Debug, Clone)] +pub struct SeedPhrase(pub Secret); + +impl PartialEq for SeedPhrase { + fn eq(&self, other: &Self) -> bool { + self.0.expose_secret().eq(other.0.expose_secret()) + } +} + +impl FromStr for SeedPhrase { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(SeedPhrase(s.parse()?)) + } +} + +#[derive(Debug, Clone)] +pub struct PrivateKey(pub Secret); + +impl PartialEq for PrivateKey { + fn eq(&self, other: &Self) -> bool { + self.0.expose_secret().eq(other.0.expose_secret()) + } +} + +impl FromStr for PrivateKey { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(PrivateKey(s.parse()?)) + } +} + +#[derive(Debug, Clone)] +pub struct APIKey(pub Secret); + +impl PartialEq for APIKey { + fn eq(&self, other: &Self) -> bool { + self.0.expose_secret().eq(other.0.expose_secret()) + } +} + +impl FromStr for APIKey { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(APIKey(s.parse()?)) + } +} diff --git a/core/lib/basic_types/src/seed_phrase.rs b/core/lib/basic_types/src/seed_phrase.rs deleted file mode 100644 index 332bfd585945..000000000000 --- a/core/lib/basic_types/src/seed_phrase.rs +++ /dev/null @@ -1,20 +0,0 @@ -use std::str::FromStr; - -use secrecy::{ExposeSecret, Secret}; - -#[derive(Debug, Clone)] -pub struct SeedPhrase(pub Secret); - -impl PartialEq for SeedPhrase { - fn eq(&self, other: &Self) -> bool { - self.0.expose_secret().eq(other.0.expose_secret()) - } -} - -impl FromStr for SeedPhrase { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - Ok(SeedPhrase(s.parse()?)) - } -} diff --git a/core/lib/utils/src/serde_wrappers.rs b/core/lib/basic_types/src/serde_wrappers.rs similarity index 97% rename from core/lib/utils/src/serde_wrappers.rs rename to core/lib/basic_types/src/serde_wrappers.rs index cb9687a8a504..4cc470493dce 100644 --- a/core/lib/utils/src/serde_wrappers.rs +++ b/core/lib/basic_types/src/serde_wrappers.rs @@ -1,3 +1,5 @@ +//! Generic `serde` helpers. + use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; /// Trait for specifying prefix for bytes to hex serialization @@ -61,9 +63,7 @@ pub type ZeroPrefixHexSerde = BytesToHexSerde; #[cfg(test)] mod tests { - use serde::{Deserialize, Serialize}; - - use crate::ZeroPrefixHexSerde; + use super::*; #[derive(Serialize, Deserialize, PartialEq, Debug)] struct Execute { diff --git a/core/lib/basic_types/src/vm.rs b/core/lib/basic_types/src/vm.rs index c753bbfc8183..f11f98596f18 100644 --- a/core/lib/basic_types/src/vm.rs +++ b/core/lib/basic_types/src/vm.rs @@ -16,6 +16,7 @@ pub enum VmVersion { Vm1_4_2, Vm1_5_0SmallBootloaderMemory, Vm1_5_0IncreasedBootloaderMemory, + VmGateway, } impl VmVersion { diff --git a/core/lib/basic_types/src/web3/mod.rs b/core/lib/basic_types/src/web3/mod.rs index aa7c49670333..e6d3cab37273 100644 --- a/core/lib/basic_types/src/web3/mod.rs +++ b/core/lib/basic_types/src/web3/mod.rs @@ -73,6 +73,14 @@ pub fn keccak256(bytes: &[u8]) -> [u8; 32] { output } +/// Hashes concatenation of the two provided hashes using `keccak256`. +pub fn keccak256_concat(hash1: H256, hash2: H256) -> H256 { + let mut bytes = [0_u8; 64]; + bytes[..32].copy_from_slice(hash1.as_bytes()); + bytes[32..].copy_from_slice(hash2.as_bytes()); + H256(keccak256(&bytes)) +} + // `Bytes`: from `web3::types::bytes` /// Raw bytes wrapper diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index af39e5159ba8..46c0b27d4b03 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -18,15 +18,10 @@ zksync_concurrency.workspace = true zksync_vlog = { workspace = true, optional = true } tracing = { workspace = true, optional = true } -url.workspace = true anyhow.workspace = true rand.workspace = true secrecy.workspace = true serde = { workspace = true, features = ["derive"] } -time = { workspace = true, features = ["serde-human-readable"] } -strum.workspace = true -strum_macros.workspace = true -vise.workspace = true [dev-dependencies] serde_json.workspace = true diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index c117064dbc40..d73dce81b13a 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -1,6 +1,6 @@ use std::{str::FromStr, time::Duration}; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; use zksync_basic_types::{ commitment::L1BatchCommitmentMode, network::Network, Address, L2ChainId, H256, }; @@ -244,3 +244,9 @@ impl MempoolConfig { Duration::from_millis(self.delay_interval) } } + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] +pub struct TimestampAsserterConfig { + /// Minimum time between current block.timestamp and the end of the asserted range + pub min_time_till_end_sec: u32, +} diff --git a/core/lib/config/src/configs/contract_verifier.rs b/core/lib/config/src/configs/contract_verifier.rs index 0016e1255de1..1dac0b17227e 100644 --- a/core/lib/config/src/configs/contract_verifier.rs +++ b/core/lib/config/src/configs/contract_verifier.rs @@ -9,13 +9,9 @@ use serde::Deserialize; pub struct ContractVerifierConfig { /// Max time of a single compilation (in s). pub compilation_timeout: u64, - /// Interval between polling db for verification requests (in ms). - pub polling_interval: Option, /// Port to which the Prometheus exporter server is listening. pub prometheus_port: u16, - pub threads_per_server: Option, pub port: u16, - pub url: String, } impl ContractVerifierConfig { @@ -23,9 +19,6 @@ impl ContractVerifierConfig { Duration::from_secs(self.compilation_timeout) } - pub fn polling_interval(&self) -> Duration { - Duration::from_millis(self.polling_interval.unwrap_or(1000)) - } pub fn bind_addr(&self) -> SocketAddr { SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), self.port) } diff --git a/core/lib/config/src/configs/contracts.rs b/core/lib/config/src/configs/contracts.rs index b68720ebaefe..38576833fa3e 100644 --- a/core/lib/config/src/configs/contracts.rs +++ b/core/lib/config/src/configs/contracts.rs @@ -29,17 +29,25 @@ pub struct ContractsConfig { pub diamond_proxy_addr: Address, pub validator_timelock_addr: Address, pub l1_shared_bridge_proxy_addr: Option
, + /// Contract address that serves as a shared bridge on L2. + /// It is expected that `L2SharedBridge` is used before gateway upgrade, and `L2AssetRouter` is used after. pub l2_shared_bridge_addr: Option
, + /// Address of `L2SharedBridge` that was used before gateway upgrade. + /// `None` if chain genesis used post-gateway protocol version. + /// If present it will be used as L2 token deployer address. + pub l2_legacy_shared_bridge_addr: Option
, pub l1_erc20_bridge_proxy_addr: Option
, pub l2_erc20_bridge_addr: Option
, pub l1_weth_bridge_proxy_addr: Option
, pub l2_weth_bridge_addr: Option
, pub l2_testnet_paymaster_addr: Option
, + pub l2_timestamp_asserter_addr: Option
, pub l1_multicall3_addr: Address, pub ecosystem_contracts: Option, // Used by the RPC API and by the node builder in wiring the BaseTokenRatioProvider layer. pub base_token_addr: Option
, pub chain_admin_addr: Option
, + pub l2_da_validator_addr: Option
, } impl ContractsConfig { @@ -53,14 +61,17 @@ impl ContractsConfig { l2_erc20_bridge_addr: Some(Address::repeat_byte(0x0c)), l1_shared_bridge_proxy_addr: Some(Address::repeat_byte(0x0e)), l2_shared_bridge_addr: Some(Address::repeat_byte(0x0f)), + l2_legacy_shared_bridge_addr: Some(Address::repeat_byte(0x19)), l1_weth_bridge_proxy_addr: Some(Address::repeat_byte(0x0b)), l2_weth_bridge_addr: Some(Address::repeat_byte(0x0c)), l2_testnet_paymaster_addr: Some(Address::repeat_byte(0x11)), l1_multicall3_addr: Address::repeat_byte(0x12), + l2_timestamp_asserter_addr: Some(Address::repeat_byte(0x19)), governance_addr: Address::repeat_byte(0x13), base_token_addr: Some(Address::repeat_byte(0x14)), ecosystem_contracts: Some(EcosystemContracts::for_tests()), chain_admin_addr: Some(Address::repeat_byte(0x18)), + l2_da_validator_addr: Some(Address::repeat_byte(0x1a)), } } } diff --git a/core/lib/config/src/configs/da_client/avail.rs b/core/lib/config/src/configs/da_client/avail.rs index 590dc5fef18a..3993656d667a 100644 --- a/core/lib/config/src/configs/da_client/avail.rs +++ b/core/lib/config/src/configs/da_client/avail.rs @@ -1,16 +1,38 @@ use serde::Deserialize; -use zksync_basic_types::seed_phrase::SeedPhrase; +use zksync_basic_types::secrets::{APIKey, SeedPhrase}; + +pub const AVAIL_GAS_RELAY_CLIENT_NAME: &str = "GasRelay"; +pub const AVAIL_FULL_CLIENT_NAME: &str = "FullClient"; + +#[derive(Clone, Debug, PartialEq, Deserialize)] +#[serde(tag = "avail_client")] +pub enum AvailClientConfig { + FullClient(AvailDefaultConfig), + GasRelay(AvailGasRelayConfig), +} #[derive(Clone, Debug, PartialEq, Deserialize)] pub struct AvailConfig { - pub api_node_url: String, pub bridge_api_url: String, + pub timeout_ms: usize, + #[serde(flatten)] + pub config: AvailClientConfig, +} + +#[derive(Clone, Debug, PartialEq, Deserialize)] +pub struct AvailDefaultConfig { + pub api_node_url: String, pub app_id: u32, - pub timeout: usize, +} + +#[derive(Clone, Debug, PartialEq, Deserialize)] +pub struct AvailGasRelayConfig { + pub gas_relay_api_url: String, pub max_retries: usize, } #[derive(Clone, Debug, PartialEq)] pub struct AvailSecrets { pub seed_phrase: Option, + pub gas_relay_api_key: Option, } diff --git a/core/lib/config/src/configs/da_client/celestia.rs b/core/lib/config/src/configs/da_client/celestia.rs new file mode 100644 index 000000000000..45810e0381e8 --- /dev/null +++ b/core/lib/config/src/configs/da_client/celestia.rs @@ -0,0 +1,15 @@ +use serde::Deserialize; +use zksync_basic_types::secrets::PrivateKey; + +#[derive(Clone, Debug, Default, PartialEq, Deserialize)] +pub struct CelestiaConfig { + pub api_node_url: String, + pub namespace: String, + pub chain_id: String, + pub timeout_ms: u64, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct CelestiaSecrets { + pub private_key: PrivateKey, +} diff --git a/core/lib/config/src/configs/da_client/eigen.rs b/core/lib/config/src/configs/da_client/eigen.rs new file mode 100644 index 000000000000..f2c05a0f61ef --- /dev/null +++ b/core/lib/config/src/configs/da_client/eigen.rs @@ -0,0 +1,13 @@ +use serde::Deserialize; +use zksync_basic_types::secrets::PrivateKey; + +#[derive(Clone, Debug, Default, PartialEq, Deserialize)] +pub struct EigenConfig { + pub rpc_node_url: String, + pub inclusion_polling_interval_ms: u64, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct EigenSecrets { + pub private_key: PrivateKey, +} diff --git a/core/lib/config/src/configs/da_client/mod.rs b/core/lib/config/src/configs/da_client/mod.rs index 406305a77b16..322c4a20aac8 100644 --- a/core/lib/config/src/configs/da_client/mod.rs +++ b/core/lib/config/src/configs/da_client/mod.rs @@ -1,12 +1,18 @@ -use crate::{AvailConfig, ObjectStoreConfig}; +use crate::{AvailConfig, CelestiaConfig, EigenConfig, ObjectStoreConfig}; pub mod avail; +pub mod celestia; +pub mod eigen; pub const AVAIL_CLIENT_CONFIG_NAME: &str = "Avail"; +pub const CELESTIA_CLIENT_CONFIG_NAME: &str = "Celestia"; +pub const EIGEN_CLIENT_CONFIG_NAME: &str = "Eigen"; pub const OBJECT_STORE_CLIENT_CONFIG_NAME: &str = "ObjectStore"; #[derive(Debug, Clone, PartialEq)] pub enum DAClientConfig { Avail(AvailConfig), + Celestia(CelestiaConfig), + Eigen(EigenConfig), ObjectStore(ObjectStoreConfig), } diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index 3a1a0505728c..7b67f015238d 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -2,7 +2,7 @@ use std::time::Duration; use anyhow::Context as _; use serde::Deserialize; -use zksync_basic_types::{settlement::SettlementMode, H256}; +use zksync_basic_types::{pubdata_da::PubdataSendingMode, settlement::SettlementMode, H256}; use zksync_crypto_primitives::K256PrivateKey; use crate::EthWatchConfig; @@ -80,15 +80,6 @@ pub enum ProofLoadingMode { FriProofFromGcs, } -#[derive(Debug, Deserialize, Clone, Copy, PartialEq, Default)] -pub enum PubdataSendingMode { - #[default] - Calldata, - Blobs, - Custom, - RelayedL2Calldata, -} - #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct SenderConfig { pub aggregated_proof_sizes: Vec, diff --git a/core/lib/config/src/configs/experimental.rs b/core/lib/config/src/configs/experimental.rs index 618cfd3d388c..2553864e251d 100644 --- a/core/lib/config/src/configs/experimental.rs +++ b/core/lib/config/src/configs/experimental.rs @@ -29,6 +29,9 @@ pub struct ExperimentalDBConfig { /// correspondingly; otherwise, RocksDB performance can significantly degrade. #[serde(default)] pub include_indices_and_filters_in_block_cache: bool, + /// Enables the stale keys repair task for the Merkle tree. + #[serde(default)] + pub merkle_tree_repair_stale_keys: bool, } impl Default for ExperimentalDBConfig { @@ -40,6 +43,7 @@ impl Default for ExperimentalDBConfig { protective_reads_persistence_enabled: false, processing_delay_ms: Self::default_merkle_tree_processing_delay_ms(), include_indices_and_filters_in_block_cache: false, + merkle_tree_repair_stale_keys: false, } } } @@ -106,4 +110,9 @@ pub struct ExperimentalVmConfig { /// the new VM doesn't produce call traces and can diverge from the old VM! #[serde(default)] pub state_keeper_fast_vm_mode: FastVmMode, + + /// Fast VM mode to use in the API server. Currently, some operations are not supported by the fast VM (e.g., `debug_traceCall` + /// or transaction validation), so the legacy VM will always be used for them. + #[serde(default)] + pub api_fast_vm_mode: FastVmMode, } diff --git a/core/lib/config/src/configs/external_price_api_client.rs b/core/lib/config/src/configs/external_price_api_client.rs index 15cc7d29d848..c1092f3a7275 100644 --- a/core/lib/config/src/configs/external_price_api_client.rs +++ b/core/lib/config/src/configs/external_price_api_client.rs @@ -4,16 +4,21 @@ use serde::Deserialize; pub const DEFAULT_TIMEOUT_MS: u64 = 10_000; +pub const DEFAULT_FORCED_NEXT_VALUE_FLUCTUATION: u32 = 3; + #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct ForcedPriceClientConfig { /// Forced conversion ratio pub numerator: Option, pub denominator: Option, - /// Forced fluctuation. It defines how much percent numerator / - /// denominator should fluctuate from their forced values. If it's None or 0, then ForcedPriceClient - /// will return the same quote every time it's called. Otherwise, ForcedPriceClient will return - /// forced_quote +/- forced_fluctuation % from its values. + /// Forced fluctuation. It defines how much percent the ratio should fluctuate from its forced + /// value. If it's None or 0, then the ForcedPriceClient will return the same quote every time + /// it's called. Otherwise, ForcedPriceClient will return quote with numerator +/- fluctuation %. pub fluctuation: Option, + /// In order to smooth out fluctuation, consecutive values returned by forced client will not + /// differ more than next_value_fluctuation percent. If it's None, a default of 3% will be applied. + #[serde(default = "ExternalPriceApiClientConfig::default_forced_next_value_fluctuation")] + pub next_value_fluctuation: u32, } #[derive(Debug, Clone, PartialEq, Deserialize)] @@ -31,6 +36,10 @@ impl ExternalPriceApiClientConfig { DEFAULT_TIMEOUT_MS } + fn default_forced_next_value_fluctuation() -> u32 { + DEFAULT_FORCED_NEXT_VALUE_FLUCTUATION + } + pub fn client_timeout(&self) -> Duration { Duration::from_millis(self.client_timeout_ms) } diff --git a/core/lib/config/src/configs/general.rs b/core/lib/config/src/configs/general.rs index bb733510f77d..dfb81af1cf8c 100644 --- a/core/lib/config/src/configs/general.rs +++ b/core/lib/config/src/configs/general.rs @@ -1,7 +1,10 @@ use crate::{ configs::{ base_token_adjuster::BaseTokenAdjusterConfig, - chain::{CircuitBreakerConfig, MempoolConfig, OperationsManagerConfig, StateKeeperConfig}, + chain::{ + CircuitBreakerConfig, MempoolConfig, OperationsManagerConfig, StateKeeperConfig, + TimestampAsserterConfig, + }, consensus::ConsensusConfig, da_client::DAClientConfig, da_dispatcher::DADispatcherConfig, @@ -56,4 +59,5 @@ pub struct GeneralConfig { pub external_proof_integration_api_config: Option, pub experimental_vm_config: Option, pub prover_job_monitor_config: Option, + pub timestamp_asserter_config: Option, } diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index a8d136d632ea..5bf9a49e0acf 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -5,7 +5,7 @@ pub use self::{ commitment_generator::CommitmentGeneratorConfig, contract_verifier::ContractVerifierConfig, contracts::{ContractsConfig, EcosystemContracts}, - da_client::{avail::AvailConfig, DAClientConfig}, + da_client::{avail::AvailConfig, celestia::CelestiaConfig, eigen::EigenConfig, DAClientConfig}, da_dispatcher::DADispatcherConfig, database::{DBConfig, PostgresConfig}, eth_sender::{EthConfig, GasAdjusterConfig}, @@ -22,10 +22,10 @@ pub use self::{ genesis::GenesisConfig, object_store::ObjectStoreConfig, observability::{ObservabilityConfig, OpentelemetryConfig}, - proof_data_handler::ProofDataHandlerConfig, + proof_data_handler::{ProofDataHandlerConfig, TeeConfig}, prover_job_monitor::ProverJobMonitorConfig, pruning::PruningConfig, - secrets::{DatabaseSecrets, L1Secrets, Secrets}, + secrets::{DataAvailabilitySecrets, DatabaseSecrets, L1Secrets, Secrets}, snapshot_recovery::SnapshotRecoveryConfig, snapshots_creator::SnapshotsCreatorConfig, utils::PrometheusConfig, @@ -60,7 +60,6 @@ pub mod house_keeper; pub mod object_store; pub mod observability; pub mod proof_data_handler; -pub mod prover_autoscaler; pub mod prover_job_monitor; pub mod pruning; pub mod secrets; diff --git a/core/lib/config/src/configs/observability.rs b/core/lib/config/src/configs/observability.rs index 42363cbcb4ff..f7ad2a97b91c 100644 --- a/core/lib/config/src/configs/observability.rs +++ b/core/lib/config/src/configs/observability.rs @@ -1,6 +1,8 @@ +use serde::Deserialize; + /// Configuration for the essential observability stack, like /// logging and sentry integration. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Deserialize)] pub struct ObservabilityConfig { /// URL of the Sentry instance to send events to. pub sentry_url: Option, @@ -15,7 +17,7 @@ pub struct ObservabilityConfig { pub log_directives: Option, } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Deserialize)] pub struct OpentelemetryConfig { /// Enables export of span data of specified level (and above) using opentelemetry exporters. pub level: String, diff --git a/core/lib/config/src/configs/proof_data_handler.rs b/core/lib/config/src/configs/proof_data_handler.rs index de7f6969b05f..1d8703df51aa 100644 --- a/core/lib/config/src/configs/proof_data_handler.rs +++ b/core/lib/config/src/configs/proof_data_handler.rs @@ -1,12 +1,56 @@ use std::time::Duration; use serde::Deserialize; +use zksync_basic_types::L1BatchNumber; + +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct TeeConfig { + /// If true, the TEE support is enabled. + pub tee_support: bool, + /// All batches before this one are considered to be processed. + pub first_tee_processed_batch: L1BatchNumber, + /// Timeout in seconds for retrying TEE proof generation if it fails. Retries continue + /// indefinitely until successful. + pub tee_proof_generation_timeout_in_secs: u16, +} + +impl Default for TeeConfig { + fn default() -> Self { + TeeConfig { + tee_support: Self::default_tee_support(), + first_tee_processed_batch: Self::default_first_tee_processed_batch(), + tee_proof_generation_timeout_in_secs: + Self::default_tee_proof_generation_timeout_in_secs(), + } + } +} + +impl TeeConfig { + pub fn default_tee_support() -> bool { + false + } + + pub fn default_first_tee_processed_batch() -> L1BatchNumber { + L1BatchNumber(0) + } + + pub fn default_tee_proof_generation_timeout_in_secs() -> u16 { + 600 + } + + pub fn tee_proof_generation_timeout(&self) -> Duration { + Duration::from_secs(self.tee_proof_generation_timeout_in_secs.into()) + } +} #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct ProofDataHandlerConfig { pub http_port: u16, pub proof_generation_timeout_in_secs: u16, - pub tee_support: bool, + #[serde(skip)] + // ^ Filled in separately in `Self::from_env()`. We cannot use `serde(flatten)` because it + // doesn't work with `envy`: https://github.com/softprops/envy/issues/26 + pub tee_config: TeeConfig, } impl ProofDataHandlerConfig { diff --git a/core/lib/config/src/configs/prover_autoscaler.rs b/core/lib/config/src/configs/prover_autoscaler.rs deleted file mode 100644 index 41131fc1b8c7..000000000000 --- a/core/lib/config/src/configs/prover_autoscaler.rs +++ /dev/null @@ -1,117 +0,0 @@ -use std::collections::HashMap; - -use serde::Deserialize; -use strum::Display; -use strum_macros::EnumString; -use time::Duration; -use vise::EncodeLabelValue; - -use crate::configs::ObservabilityConfig; - -/// Config used for running ProverAutoscaler (both Scaler and Agent). -#[derive(Debug, Clone, PartialEq)] -pub struct ProverAutoscalerConfig { - /// Amount of time ProverJobMonitor will wait all it's tasks to finish. - // TODO: find a way to use #[serde(with = "humantime_serde")] with time::Duration. - pub graceful_shutdown_timeout: Duration, - pub agent_config: Option, - pub scaler_config: Option, - pub observability: Option, -} - -#[derive(Debug, Clone, PartialEq, Deserialize)] -pub struct ProverAutoscalerAgentConfig { - /// Port for prometheus metrics connection. - pub prometheus_port: u16, - /// HTTP port for global Scaler to connect to the Agent running in a cluster. - pub http_port: u16, - /// List of namespaces to watch. - #[serde(default = "ProverAutoscalerAgentConfig::default_namespaces")] - pub namespaces: Vec, - /// Watched cluster name. Also can be set via flag. - pub cluster_name: Option, -} - -#[derive(Debug, Clone, PartialEq, Deserialize, Default)] -pub struct ProverAutoscalerScalerConfig { - /// Port for prometheus metrics connection. - pub prometheus_port: u16, - /// The interval between runs for global Scaler. - #[serde(default = "ProverAutoscalerScalerConfig::default_scaler_run_interval")] - pub scaler_run_interval: Duration, - /// URL to get queue reports from. - /// In production should be "http://prover-job-monitor.stage2.svc.cluster.local:3074/queue_report". - #[serde(default = "ProverAutoscalerScalerConfig::default_prover_job_monitor_url")] - pub prover_job_monitor_url: String, - /// List of ProverAutoscaler Agents to get cluster data from. - pub agents: Vec, - /// Mapping of namespaces to protocol versions. - pub protocol_versions: HashMap, - /// Default priorities, which cluster to prefer when there is no other information. - pub cluster_priorities: HashMap, - /// Prover speed per GPU. Used to calculate desired number of provers for queue size. - pub prover_speed: HashMap, - /// Duration after which pending pod considered long pending. - #[serde(default = "ProverAutoscalerScalerConfig::default_long_pending_duration")] - pub long_pending_duration: Duration, -} - -#[derive( - Default, - Debug, - Display, - Hash, - PartialEq, - Eq, - Clone, - Copy, - Ord, - PartialOrd, - EnumString, - EncodeLabelValue, - Deserialize, -)] -pub enum Gpu { - #[default] - Unknown, - #[strum(ascii_case_insensitive)] - L4, - #[strum(ascii_case_insensitive)] - T4, - #[strum(ascii_case_insensitive)] - V100, - #[strum(ascii_case_insensitive)] - P100, - #[strum(ascii_case_insensitive)] - A100, -} - -impl ProverAutoscalerConfig { - /// Default graceful shutdown timeout -- 5 seconds - pub fn default_graceful_shutdown_timeout() -> Duration { - Duration::seconds(5) - } -} - -impl ProverAutoscalerAgentConfig { - pub fn default_namespaces() -> Vec { - vec!["prover-blue".to_string(), "prover-red".to_string()] - } -} - -impl ProverAutoscalerScalerConfig { - /// Default scaler_run_interval -- 10s - pub fn default_scaler_run_interval() -> Duration { - Duration::seconds(10) - } - - /// Default prover_job_monitor_url -- cluster local URL - pub fn default_prover_job_monitor_url() -> String { - "http://localhost:3074/queue_report".to_string() - } - - /// Default long_pending_duration -- 10m - pub fn default_long_pending_duration() -> Duration { - Duration::minutes(10) - } -} diff --git a/core/lib/config/src/configs/secrets.rs b/core/lib/config/src/configs/secrets.rs index 779bad370659..75ff067c2473 100644 --- a/core/lib/config/src/configs/secrets.rs +++ b/core/lib/config/src/configs/secrets.rs @@ -1,7 +1,10 @@ use anyhow::Context; use zksync_basic_types::url::SensitiveUrl; -use crate::configs::{consensus::ConsensusSecrets, da_client::avail::AvailSecrets}; +use crate::configs::{ + consensus::ConsensusSecrets, + da_client::{avail::AvailSecrets, celestia::CelestiaSecrets, eigen::EigenSecrets}, +}; #[derive(Debug, Clone, PartialEq)] pub struct DatabaseSecrets { @@ -18,6 +21,8 @@ pub struct L1Secrets { #[derive(Debug, Clone, PartialEq)] pub enum DataAvailabilitySecrets { Avail(AvailSecrets), + Celestia(CelestiaSecrets), + Eigen(EigenSecrets), } #[derive(Debug, Clone, PartialEq)] diff --git a/core/lib/config/src/lib.rs b/core/lib/config/src/lib.rs index 9191edc39822..f77a8ceb39ad 100644 --- a/core/lib/config/src/lib.rs +++ b/core/lib/config/src/lib.rs @@ -1,10 +1,10 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] pub use crate::configs::{ - ApiConfig, AvailConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, ContractsConfig, - DAClientConfig, DADispatcherConfig, DBConfig, EthConfig, EthWatchConfig, - ExternalProofIntegrationApiConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, - PostgresConfig, SnapshotsCreatorConfig, + ApiConfig, AvailConfig, BaseTokenAdjusterConfig, CelestiaConfig, ContractVerifierConfig, + ContractsConfig, DAClientConfig, DADispatcherConfig, DBConfig, EigenConfig, EthConfig, + EthWatchConfig, ExternalProofIntegrationApiConfig, GasAdjusterConfig, GenesisConfig, + ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; pub mod configs; diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 0fdd927d19f0..c24d47f27b33 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -7,7 +7,8 @@ use zksync_basic_types::{ commitment::L1BatchCommitmentMode, network::Network, protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, - seed_phrase::SeedPhrase, + pubdata_da::PubdataSendingMode, + secrets::{APIKey, SeedPhrase}, vm::FastVmMode, L1BatchNumber, L1ChainId, L2ChainId, }; @@ -16,7 +17,12 @@ use zksync_crypto_primitives::K256PrivateKey; use crate::{ configs::{ - self, da_client::DAClientConfig::Avail, eth_sender::PubdataSendingMode, + self, + chain::TimestampAsserterConfig, + da_client::{ + avail::{AvailClientConfig, AvailDefaultConfig}, + DAClientConfig::Avail, + }, external_price_api_client::ForcedPriceClientConfig, }, AvailConfig, @@ -235,11 +241,8 @@ impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::ContractVerifierConfig { configs::ContractVerifierConfig { compilation_timeout: self.sample(rng), - polling_interval: self.sample(rng), prometheus_port: self.sample(rng), - threads_per_server: self.sample(rng), port: self.sample(rng), - url: self.sample(rng), } } } @@ -256,13 +259,16 @@ impl Distribution for EncodeDist { l2_erc20_bridge_addr: self.sample_opt(|| rng.gen()), l1_shared_bridge_proxy_addr: self.sample_opt(|| rng.gen()), l2_shared_bridge_addr: self.sample_opt(|| rng.gen()), + l2_legacy_shared_bridge_addr: self.sample_opt(|| rng.gen()), l1_weth_bridge_proxy_addr: self.sample_opt(|| rng.gen()), l2_weth_bridge_addr: self.sample_opt(|| rng.gen()), l2_testnet_paymaster_addr: self.sample_opt(|| rng.gen()), + l2_timestamp_asserter_addr: self.sample_opt(|| rng.gen()), l1_multicall3_addr: rng.gen(), ecosystem_contracts: self.sample(rng), base_token_addr: self.sample_opt(|| rng.gen()), chain_admin_addr: self.sample_opt(|| rng.gen()), + l2_da_validator_addr: self.sample_opt(|| rng.gen()), } } } @@ -299,6 +305,7 @@ impl Distribution for EncodeDist { protective_reads_persistence_enabled: self.sample(rng), processing_delay_ms: self.sample(rng), include_indices_and_filters_in_block_cache: self.sample(rng), + merkle_tree_repair_stale_keys: self.sample(rng), } } } @@ -328,6 +335,7 @@ impl Distribution for EncodeDist { configs::ExperimentalVmConfig { playground: self.sample(rng), state_keeper_fast_vm_mode: gen_fast_vm_mode(rng), + api_fast_vm_mode: gen_fast_vm_mode(rng), } } } @@ -388,17 +396,6 @@ impl Distribution for EncodeDist { } } -impl Distribution for EncodeDist { - fn sample(&self, rng: &mut R) -> configs::eth_sender::PubdataSendingMode { - type T = configs::eth_sender::PubdataSendingMode; - match rng.gen_range(0..3) { - 0 => T::Calldata, - 1 => T::Blobs, - _ => T::Custom, - } - } -} - impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::eth_sender::SenderConfig { configs::eth_sender::SenderConfig { @@ -680,7 +677,11 @@ impl Distribution for EncodeDist { configs::ProofDataHandlerConfig { http_port: self.sample(rng), proof_generation_timeout_in_secs: self.sample(rng), - tee_support: self.sample(rng), + tee_config: configs::TeeConfig { + tee_support: self.sample(rng), + first_tee_processed_batch: L1BatchNumber(rng.gen()), + tee_proof_generation_timeout_in_secs: self.sample(rng), + }, } } } @@ -946,11 +947,12 @@ impl Distribution for EncodeDist { impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::da_client::DAClientConfig { Avail(AvailConfig { - api_node_url: self.sample(rng), bridge_api_url: self.sample(rng), - app_id: self.sample(rng), - timeout: self.sample(rng), - max_retries: self.sample(rng), + timeout_ms: self.sample(rng), + config: AvailClientConfig::FullClient(AvailDefaultConfig { + api_node_url: self.sample(rng), + app_id: self.sample(rng), + }), }) } } @@ -959,6 +961,7 @@ impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::secrets::DataAvailabilitySecrets { configs::secrets::DataAvailabilitySecrets::Avail(configs::da_client::avail::AvailSecrets { seed_phrase: Some(SeedPhrase(Secret::new(self.sample(rng)))), + gas_relay_api_key: Some(APIKey(Secret::new(self.sample(rng)))), }) } } @@ -1110,6 +1113,7 @@ impl Distribution for EncodeDist { external_proof_integration_api_config: self.sample(rng), experimental_vm_config: self.sample(rng), prover_job_monitor_config: self.sample(rng), + timestamp_asserter_config: self.sample(rng), + } + } +} + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> TimestampAsserterConfig { + TimestampAsserterConfig { + min_time_till_end_sec: self.sample(rng), } } } diff --git a/core/lib/constants/Cargo.toml b/core/lib/constants/Cargo.toml index b741b5734902..bc4d1f7bb57f 100644 --- a/core/lib/constants/Cargo.toml +++ b/core/lib/constants/Cargo.toml @@ -12,6 +12,5 @@ categories.workspace = true [dependencies] zksync_basic_types.workspace = true -zksync_utils.workspace = true once_cell.workspace = true diff --git a/core/lib/constants/src/contracts.rs b/core/lib/constants/src/contracts.rs index fe37ef6c69fd..4f0f362d9149 100644 --- a/core/lib/constants/src/contracts.rs +++ b/core/lib/constants/src/contracts.rs @@ -135,12 +135,36 @@ pub const EVM_GAS_MANAGER_ADDRESS: Address = H160([ 0x00, 0x00, 0x80, 0x13, ]); -/// Note, that the `Create2Factory` is explicitly deployed on a non-system-contract address. pub const CREATE2_FACTORY_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, ]); +pub const L2_GENESIS_UPGRADE_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x01, +]); + +pub const L2_BRIDGEHUB_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x02, +]); + +pub const L2_ASSET_ROUTER_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x03, +]); + +pub const L2_NATIVE_TOKEN_VAULT_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x04, +]); + +pub const L2_MESSAGE_ROOT_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x05, +]); + pub const ERC20_TRANSFER_TOPIC: H256 = H256([ 221, 242, 82, 173, 27, 226, 200, 155, 105, 194, 176, 104, 252, 55, 141, 170, 149, 43, 167, 241, 99, 196, 161, 22, 40, 245, 90, 77, 245, 35, 179, 239, diff --git a/core/lib/constants/src/lib.rs b/core/lib/constants/src/lib.rs index 6aab79ad71f3..30ae6a7b582a 100644 --- a/core/lib/constants/src/lib.rs +++ b/core/lib/constants/src/lib.rs @@ -3,6 +3,7 @@ pub mod contracts; pub mod crypto; pub mod ethereum; pub mod fees; +pub mod message_root; pub mod system_context; pub mod system_logs; pub mod trusted_slots; diff --git a/core/lib/constants/src/message_root.rs b/core/lib/constants/src/message_root.rs new file mode 100644 index 000000000000..a8f4a034fb99 --- /dev/null +++ b/core/lib/constants/src/message_root.rs @@ -0,0 +1,5 @@ +// Position of `FullTree::_height` in `MessageRoot`'s storage layout. +pub const AGG_TREE_HEIGHT_KEY: usize = 3; + +// Position of `FullTree::nodes` in `MessageRoot`'s storage layout. +pub const AGG_TREE_NODES_KEY: usize = 5; diff --git a/core/lib/constants/src/system_logs.rs b/core/lib/constants/src/system_logs.rs index bd4167b3d02c..aa2c2cc156cc 100644 --- a/core/lib/constants/src/system_logs.rs +++ b/core/lib/constants/src/system_logs.rs @@ -1,11 +1,8 @@ /// The key of the system log with value of the L2->L1 logs tree root hash pub const L2_TO_L1_LOGS_TREE_ROOT_KEY: u32 = 0; -/// The key of the system log with value of the state diff hash -pub const STATE_DIFF_HASH_KEY: u32 = 2; +/// The key of the system log with value of the state diff hash for pre-gateway protocol versions +pub const STATE_DIFF_HASH_KEY_PRE_GATEWAY: u32 = 2; -/// The key of the system log with value of the first blob linear hash -pub const BLOB1_LINEAR_HASH_KEY: u32 = 7; - -/// The key of the system log with value of the second blob linear hash -pub const BLOB2_LINEAR_HASH_KEY: u32 = 8; +/// The key of the system log with value of the first blob linear hash for pre-gateway protocol versions +pub const BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY: u32 = 7; diff --git a/core/lib/constants/src/trusted_slots.rs b/core/lib/constants/src/trusted_slots.rs index e5a626d49036..d66b2bfd4729 100644 --- a/core/lib/constants/src/trusted_slots.rs +++ b/core/lib/constants/src/trusted_slots.rs @@ -1,6 +1,5 @@ use once_cell::sync::Lazy; -use zksync_basic_types::{H256, U256}; -use zksync_utils::h256_to_u256; +use zksync_basic_types::{h256_to_u256, H256, U256}; /// /// Well known-slots (e.g. proxy addresses in popular EIPs). diff --git a/core/lib/contract_verifier/Cargo.toml b/core/lib/contract_verifier/Cargo.toml index 580982c9a700..6ccd6422d7da 100644 --- a/core/lib/contract_verifier/Cargo.toml +++ b/core/lib/contract_verifier/Cargo.toml @@ -13,7 +13,6 @@ categories.workspace = true [dependencies] zksync_types.workspace = true zksync_dal.workspace = true -zksync_config.workspace = true zksync_contracts.workspace = true zksync_queued_job_processor.workspace = true zksync_utils.workspace = true @@ -27,8 +26,14 @@ ethabi.workspace = true vise.workspace = true hex.workspace = true serde = { workspace = true, features = ["derive"] } -lazy_static.workspace = true tempfile.workspace = true regex.workspace = true tracing.workspace = true semver.workspace = true + +[dev-dependencies] +zksync_node_test_utils.workspace = true +zksync_vm_interface.workspace = true + +assert_matches.workspace = true +test-casing.workspace = true diff --git a/core/lib/contract_verifier/src/compilers/mod.rs b/core/lib/contract_verifier/src/compilers/mod.rs new file mode 100644 index 000000000000..c82a6575ee4c --- /dev/null +++ b/core/lib/contract_verifier/src/compilers/mod.rs @@ -0,0 +1,143 @@ +use std::collections::HashMap; + +use anyhow::Context as _; +use serde::{Deserialize, Serialize}; +use zksync_types::contract_verification_api::CompilationArtifacts; + +pub(crate) use self::{ + solc::{Solc, SolcInput}, + vyper::{Vyper, VyperInput}, + zksolc::{ZkSolc, ZkSolcInput}, + zkvyper::ZkVyper, +}; +use crate::error::ContractVerifierError; + +mod solc; +mod vyper; +mod zksolc; +mod zkvyper; + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct StandardJson { + pub language: String, + pub sources: HashMap, + #[serde(default)] + settings: Settings, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +struct Settings { + /// The output selection filters. + output_selection: Option, + /// Other settings (only filled when parsing `StandardJson` input from the request). + #[serde(flatten)] + other: serde_json::Value, +} + +impl Default for Settings { + fn default() -> Self { + Self { + output_selection: None, + other: serde_json::json!({}), + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct Source { + /// The source code file content. + pub content: String, +} + +/// Users may provide either just contract name or source file name and contract name joined with ":". +fn process_contract_name(original_name: &str, extension: &str) -> (String, String) { + if let Some((file_name, contract_name)) = original_name.rsplit_once(':') { + (file_name.to_owned(), contract_name.to_owned()) + } else { + ( + format!("{original_name}.{extension}"), + original_name.to_owned(), + ) + } +} + +/// Parsing logic shared between `solc` and `zksolc`. +fn parse_standard_json_output( + output: &serde_json::Value, + contract_name: String, + file_name: String, + get_deployed_bytecode: bool, +) -> Result { + if let Some(errors) = output.get("errors") { + let errors = errors.as_array().unwrap().clone(); + if errors + .iter() + .any(|err| err["severity"].as_str() == Some("error")) + { + let error_messages = errors + .into_iter() + .filter_map(|err| { + // `formattedMessage` is an optional field + err.get("formattedMessage") + .or_else(|| err.get("message")) + .cloned() + }) + .collect(); + return Err(ContractVerifierError::CompilationError( + serde_json::Value::Array(error_messages), + )); + } + } + + let contracts = output["contracts"] + .get(&file_name) + .ok_or(ContractVerifierError::MissingSource(file_name))?; + let Some(contract) = contracts.get(&contract_name) else { + return Err(ContractVerifierError::MissingContract(contract_name)); + }; + + let Some(bytecode_str) = contract.pointer("/evm/bytecode/object") else { + return Err(ContractVerifierError::AbstractContract(contract_name)); + }; + let bytecode_str = bytecode_str + .as_str() + .context("unexpected `/evm/bytecode/object` value")?; + // Strip an optional `0x` prefix (output by `vyper`, but not by `solc` / `zksolc`) + let bytecode_str = bytecode_str.strip_prefix("0x").unwrap_or(bytecode_str); + let bytecode = hex::decode(bytecode_str).context("invalid bytecode")?; + + let deployed_bytecode = if get_deployed_bytecode { + let Some(bytecode_str) = contract.pointer("/evm/deployedBytecode/object") else { + return Err(ContractVerifierError::AbstractContract(contract_name)); + }; + let bytecode_str = bytecode_str + .as_str() + .context("unexpected `/evm/deployedBytecode/object` value")?; + let bytecode_str = bytecode_str.strip_prefix("0x").unwrap_or(bytecode_str); + Some(hex::decode(bytecode_str).context("invalid deployed bytecode")?) + } else { + None + }; + + let mut abi = contract["abi"].clone(); + if abi.is_null() { + // ABI is undefined for Yul contracts when compiled with standalone `solc`. For uniformity with `zksolc`, + // replace it with an empty array. + abi = serde_json::json!([]); + } else if !abi.is_array() { + let err = anyhow::anyhow!( + "unexpected value for ABI: {}", + serde_json::to_string_pretty(&abi).unwrap() + ); + return Err(err.into()); + } + + Ok(CompilationArtifacts { + bytecode, + deployed_bytecode, + abi, + }) +} diff --git a/core/lib/contract_verifier/src/compilers/solc.rs b/core/lib/contract_verifier/src/compilers/solc.rs new file mode 100644 index 000000000000..10adcad3542e --- /dev/null +++ b/core/lib/contract_verifier/src/compilers/solc.rs @@ -0,0 +1,139 @@ +use std::{collections::HashMap, path::PathBuf, process::Stdio}; + +use anyhow::Context; +use tokio::io::AsyncWriteExt; +use zksync_queued_job_processor::async_trait; +use zksync_types::contract_verification_api::{ + CompilationArtifacts, SourceCodeData, VerificationIncomingRequest, +}; + +use super::{parse_standard_json_output, process_contract_name, Settings, Source, StandardJson}; +use crate::{error::ContractVerifierError, resolver::Compiler}; + +// Here and below, fields are public for testing purposes. +#[derive(Debug)] +pub(crate) struct SolcInput { + pub standard_json: StandardJson, + pub contract_name: String, + pub file_name: String, +} + +#[derive(Debug)] +pub(crate) struct Solc { + path: PathBuf, +} + +impl Solc { + pub fn new(path: PathBuf) -> Self { + Self { path } + } + + pub fn build_input( + req: VerificationIncomingRequest, + ) -> Result { + let (file_name, contract_name) = process_contract_name(&req.contract_name, "sol"); + let default_output_selection = serde_json::json!({ + "*": { + "*": [ "abi", "evm.bytecode", "evm.deployedBytecode" ], + "": [ "abi", "evm.bytecode", "evm.deployedBytecode" ], + } + }); + + let standard_json = match req.source_code_data { + SourceCodeData::SolSingleFile(source_code) => { + let source = Source { + content: source_code, + }; + let sources = HashMap::from([(file_name.clone(), source)]); + let settings = Settings { + output_selection: Some(default_output_selection), + other: serde_json::json!({ + "optimizer": { + "enabled": req.optimization_used, + }, + }), + }; + + StandardJson { + language: "Solidity".to_owned(), + sources, + settings, + } + } + SourceCodeData::StandardJsonInput(map) => { + let mut compiler_input: StandardJson = + serde_json::from_value(serde_json::Value::Object(map)) + .map_err(|_| ContractVerifierError::FailedToDeserializeInput)?; + // Set default output selection even if it is different in request. + compiler_input.settings.output_selection = Some(default_output_selection); + compiler_input + } + SourceCodeData::YulSingleFile(source_code) => { + let source = Source { + content: source_code, + }; + let sources = HashMap::from([(file_name.clone(), source)]); + let settings = Settings { + output_selection: Some(default_output_selection), + other: serde_json::json!({ + "optimizer": { + "enabled": req.optimization_used, + }, + }), + }; + StandardJson { + language: "Yul".to_owned(), + sources, + settings, + } + } + other => unreachable!("Unexpected `SourceCodeData` variant: {other:?}"), + }; + + Ok(SolcInput { + standard_json, + contract_name, + file_name, + }) + } +} + +#[async_trait] +impl Compiler for Solc { + async fn compile( + self: Box, + input: SolcInput, + ) -> Result { + let mut command = tokio::process::Command::new(&self.path); + let mut child = command + .arg("--standard-json") + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .context("failed spawning solc")?; + let stdin = child.stdin.as_mut().unwrap(); + let content = serde_json::to_vec(&input.standard_json) + .context("cannot encode standard JSON input for solc")?; + stdin + .write_all(&content) + .await + .context("failed writing standard JSON to solc stdin")?; + stdin + .flush() + .await + .context("failed flushing standard JSON to solc")?; + + let output = child.wait_with_output().await.context("solc failed")?; + if output.status.success() { + let output = serde_json::from_slice(&output.stdout) + .context("zksolc output is not valid JSON")?; + parse_standard_json_output(&output, input.contract_name, input.file_name, true) + } else { + Err(ContractVerifierError::CompilerError( + "solc", + String::from_utf8_lossy(&output.stderr).to_string(), + )) + } + } +} diff --git a/core/lib/contract_verifier/src/compilers/vyper.rs b/core/lib/contract_verifier/src/compilers/vyper.rs new file mode 100644 index 000000000000..59b950f9f17f --- /dev/null +++ b/core/lib/contract_verifier/src/compilers/vyper.rs @@ -0,0 +1,114 @@ +use std::{collections::HashMap, mem, path::PathBuf, process::Stdio}; + +use anyhow::Context; +use tokio::io::AsyncWriteExt; +use zksync_queued_job_processor::async_trait; +use zksync_types::contract_verification_api::{ + CompilationArtifacts, SourceCodeData, VerificationIncomingRequest, +}; + +use super::{parse_standard_json_output, process_contract_name, Settings, Source, StandardJson}; +use crate::{error::ContractVerifierError, resolver::Compiler}; + +#[derive(Debug)] +pub(crate) struct VyperInput { + pub contract_name: String, + pub file_name: String, + pub sources: HashMap, + pub optimizer_mode: Option, +} + +impl VyperInput { + pub fn new(req: VerificationIncomingRequest) -> Result { + let (file_name, contract_name) = process_contract_name(&req.contract_name, "vy"); + + let sources = match req.source_code_data { + SourceCodeData::VyperMultiFile(s) => s, + other => unreachable!("unexpected `SourceCodeData` variant: {other:?}"), + }; + Ok(Self { + contract_name, + file_name, + sources, + optimizer_mode: if req.optimization_used { + req.optimizer_mode + } else { + // `none` mode is not the default mode (which is `gas`), so we must specify it explicitly here + Some("none".to_owned()) + }, + }) + } + + fn take_standard_json(&mut self) -> StandardJson { + let sources = mem::take(&mut self.sources); + let sources = sources + .into_iter() + .map(|(name, content)| (name, Source { content })); + + StandardJson { + language: "Vyper".to_owned(), + sources: sources.collect(), + settings: Settings { + output_selection: Some(serde_json::json!({ + "*": [ "abi", "evm.bytecode", "evm.deployedBytecode" ], + })), + other: serde_json::json!({ + "optimize": self.optimizer_mode.as_deref(), + }), + }, + } + } +} + +#[derive(Debug)] +pub(crate) struct Vyper { + path: PathBuf, +} + +impl Vyper { + pub fn new(path: PathBuf) -> Self { + Self { path } + } +} + +#[async_trait] +impl Compiler for Vyper { + async fn compile( + self: Box, + mut input: VyperInput, + ) -> Result { + let mut command = tokio::process::Command::new(&self.path); + let mut child = command + .arg("--standard-json") + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .context("cannot spawn vyper")?; + let mut stdin = child.stdin.take().unwrap(); + let standard_json = input.take_standard_json(); + let content = serde_json::to_vec(&standard_json) + .context("cannot encode standard JSON input for vyper")?; + stdin + .write_all(&content) + .await + .context("failed writing standard JSON to vyper stdin")?; + stdin + .flush() + .await + .context("failed flushing standard JSON to vyper")?; + drop(stdin); + + let output = child.wait_with_output().await.context("vyper failed")?; + if output.status.success() { + let output = + serde_json::from_slice(&output.stdout).context("vyper output is not valid JSON")?; + parse_standard_json_output(&output, input.contract_name, input.file_name, true) + } else { + Err(ContractVerifierError::CompilerError( + "vyper", + String::from_utf8_lossy(&output.stderr).to_string(), + )) + } + } +} diff --git a/core/lib/contract_verifier/src/compilers/zksolc.rs b/core/lib/contract_verifier/src/compilers/zksolc.rs new file mode 100644 index 000000000000..ff435e96aeb6 --- /dev/null +++ b/core/lib/contract_verifier/src/compilers/zksolc.rs @@ -0,0 +1,319 @@ +use std::{collections::HashMap, io::Write, process::Stdio}; + +use anyhow::Context as _; +use regex::Regex; +use semver::Version; +use serde::{Deserialize, Serialize}; +use tokio::io::AsyncWriteExt; +use zksync_queued_job_processor::async_trait; +use zksync_types::contract_verification_api::{ + CompilationArtifacts, SourceCodeData, VerificationIncomingRequest, +}; + +use super::{parse_standard_json_output, process_contract_name, Source}; +use crate::{ + error::ContractVerifierError, + resolver::{Compiler, CompilerPaths}, +}; + +#[derive(Debug)] +pub(crate) enum ZkSolcInput { + StandardJson { + input: StandardJson, + contract_name: String, + file_name: String, + }, + YulSingleFile { + source_code: String, + is_system: bool, + }, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct StandardJson { + /// The input language. + pub language: String, + /// The input source code files hashmap. + pub sources: HashMap, + /// The compiler settings. + pub settings: Settings, +} + +/// Compiler settings. +/// There are fields like `output_selection`, `is_system`, `force_evmla` which are accessed by contract verifier explicitly. +/// Other fields are accumulated in `other`, this way every field that was in the original request will be passed to a compiler. +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct Settings { + /// The output selection filters. + pub output_selection: Option, + /// Flag for system compilation mode. + #[serde(default)] + pub is_system: bool, + /// Flag to force `evmla` IR. + #[serde(default)] + pub force_evmla: bool, + /// Other settings (only filled when parsing `StandardJson` input from the request). + #[serde(flatten)] + pub other: serde_json::Value, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct Optimizer { + /// Whether the optimizer is enabled. + pub enabled: bool, + /// The optimization mode string. + pub mode: Option, +} + +#[derive(Debug)] +pub(crate) struct ZkSolc { + paths: CompilerPaths, + zksolc_version: String, +} + +impl ZkSolc { + pub fn new(paths: CompilerPaths, zksolc_version: String) -> Self { + ZkSolc { + paths, + zksolc_version, + } + } + + pub fn build_input( + req: VerificationIncomingRequest, + ) -> Result { + let (file_name, contract_name) = process_contract_name(&req.contract_name, "sol"); + let default_output_selection = serde_json::json!({ + "*": { + "*": [ "abi" ], + "": [ "abi" ] + } + }); + + match req.source_code_data { + SourceCodeData::SolSingleFile(source_code) => { + let source = Source { + content: source_code, + }; + let sources = HashMap::from([(file_name.clone(), source)]); + let settings = Settings { + output_selection: Some(default_output_selection), + is_system: req.is_system, + force_evmla: req.force_evmla, + other: serde_json::json!({ + "optimizer": Optimizer { + enabled: req.optimization_used, + mode: req.optimizer_mode.and_then(|s| s.chars().next()), + }, + }), + }; + + Ok(ZkSolcInput::StandardJson { + input: StandardJson { + language: "Solidity".to_string(), + sources, + settings, + }, + contract_name, + file_name, + }) + } + SourceCodeData::StandardJsonInput(map) => { + let mut compiler_input: StandardJson = + serde_json::from_value(serde_json::Value::Object(map)) + .map_err(|_| ContractVerifierError::FailedToDeserializeInput)?; + // Set default output selection even if it is different in request. + compiler_input.settings.output_selection = Some(default_output_selection); + Ok(ZkSolcInput::StandardJson { + input: compiler_input, + contract_name, + file_name, + }) + } + SourceCodeData::YulSingleFile(source_code) => Ok(ZkSolcInput::YulSingleFile { + source_code, + is_system: req.is_system, + }), + other => unreachable!("Unexpected `SourceCodeData` variant: {other:?}"), + } + } + + fn parse_single_file_yul_output( + output: &str, + ) -> Result { + let re = Regex::new(r"Contract `.*` bytecode: 0x([\da-f]+)").unwrap(); + let cap = re + .captures(output) + .context("Yul output doesn't match regex")?; + let bytecode_str = cap.get(1).context("no matches in Yul output")?.as_str(); + let bytecode = hex::decode(bytecode_str).context("invalid Yul output bytecode")?; + Ok(CompilationArtifacts { + bytecode, + deployed_bytecode: None, + abi: serde_json::Value::Array(Vec::new()), + }) + } + + fn is_post_1_5_0(&self) -> bool { + // Special case + if &self.zksolc_version == "vm-1.5.0-a167aa3" { + false + } else if let Some(version) = self.zksolc_version.strip_prefix("v") { + if let Ok(semver) = Version::parse(version) { + let target = Version::new(1, 5, 0); + semver >= target + } else { + true + } + } else { + true + } + } +} + +#[async_trait] +impl Compiler for ZkSolc { + async fn compile( + self: Box, + input: ZkSolcInput, + ) -> Result { + let mut command = tokio::process::Command::new(&self.paths.zk); + command.stdout(Stdio::piped()).stderr(Stdio::piped()); + + match &input { + ZkSolcInput::StandardJson { input, .. } => { + if !self.is_post_1_5_0() { + if input.settings.is_system { + command.arg("--system-mode"); + } + if input.settings.force_evmla { + command.arg("--force-evmla"); + } + } + + command.arg("--solc").arg(&self.paths.base); + } + ZkSolcInput::YulSingleFile { is_system, .. } => { + if self.is_post_1_5_0() { + if *is_system { + command.arg("--enable-eravm-extensions"); + } else { + command.arg("--solc").arg(&self.paths.base); + } + } else { + if *is_system { + command.arg("--system-mode"); + } + command.arg("--solc").arg(&self.paths.base); + } + } + } + match input { + ZkSolcInput::StandardJson { + input, + contract_name, + file_name, + } => { + let mut child = command + .arg("--standard-json") + .stdin(Stdio::piped()) + .spawn() + .context("failed spawning zksolc")?; + let stdin = child.stdin.as_mut().unwrap(); + let content = serde_json::to_vec(&input) + .context("cannot encode standard JSON input for zksolc")?; + stdin + .write_all(&content) + .await + .context("failed writing standard JSON to zksolc stdin")?; + stdin + .flush() + .await + .context("failed flushing standard JSON to zksolc")?; + + let output = child.wait_with_output().await.context("zksolc failed")?; + if output.status.success() { + let output = serde_json::from_slice(&output.stdout) + .context("zksolc output is not valid JSON")?; + parse_standard_json_output(&output, contract_name, file_name, false) + } else { + Err(ContractVerifierError::CompilerError( + "zksolc", + String::from_utf8_lossy(&output.stderr).to_string(), + )) + } + } + ZkSolcInput::YulSingleFile { source_code, .. } => { + let mut file = tempfile::Builder::new() + .prefix("input") + .suffix(".yul") + .rand_bytes(0) + .tempfile() + .context("cannot create temporary Yul file")?; + file.write_all(source_code.as_bytes()) + .context("failed writing Yul file")?; + let child = command + .arg(file.path().to_str().unwrap()) + .arg("--optimization") + .arg("3") + .arg("--yul") + .arg("--bin") + .spawn() + .context("failed spawning zksolc")?; + let output = child.wait_with_output().await.context("zksolc failed")?; + if output.status.success() { + let output = + String::from_utf8(output.stdout).context("zksolc output is not UTF-8")?; + Self::parse_single_file_yul_output(&output) + } else { + Err(ContractVerifierError::CompilerError( + "zksolc", + String::from_utf8_lossy(&output.stderr).to_string(), + )) + } + } + } + } +} + +#[cfg(test)] +mod tests { + use std::path::PathBuf; + + use super::*; + + #[test] + fn check_is_post_1_5_0() { + // Special case. + let compiler_paths = CompilerPaths { + base: PathBuf::default(), + zk: PathBuf::default(), + }; + let mut zksolc = ZkSolc::new(compiler_paths, "vm-1.5.0-a167aa3".to_string()); + assert!(!zksolc.is_post_1_5_0(), "vm-1.5.0-a167aa3"); + + zksolc.zksolc_version = "v1.5.0".to_string(); + assert!(zksolc.is_post_1_5_0(), "v1.5.0"); + + zksolc.zksolc_version = "v1.5.1".to_string(); + assert!(zksolc.is_post_1_5_0(), "v1.5.1"); + + zksolc.zksolc_version = "v1.10.1".to_string(); + assert!(zksolc.is_post_1_5_0(), "v1.10.1"); + + zksolc.zksolc_version = "v2.0.0".to_string(); + assert!(zksolc.is_post_1_5_0(), "v2.0.0"); + + zksolc.zksolc_version = "v1.4.15".to_string(); + assert!(!zksolc.is_post_1_5_0(), "v1.4.15"); + + zksolc.zksolc_version = "v1.3.21".to_string(); + assert!(!zksolc.is_post_1_5_0(), "v1.3.21"); + + zksolc.zksolc_version = "v0.5.1".to_string(); + assert!(!zksolc.is_post_1_5_0(), "v0.5.1"); + } +} diff --git a/core/lib/contract_verifier/src/compilers/zkvyper.rs b/core/lib/contract_verifier/src/compilers/zkvyper.rs new file mode 100644 index 000000000000..4f7c10214f8a --- /dev/null +++ b/core/lib/contract_verifier/src/compilers/zkvyper.rs @@ -0,0 +1,167 @@ +use std::{ffi::OsString, path, path::Path, process::Stdio}; + +use anyhow::Context as _; +use tokio::{fs, io::AsyncWriteExt}; +use zksync_queued_job_processor::async_trait; +use zksync_types::contract_verification_api::CompilationArtifacts; + +use super::VyperInput; +use crate::{ + error::ContractVerifierError, + resolver::{Compiler, CompilerPaths}, +}; + +impl VyperInput { + async fn write_files(&self, root_dir: &Path) -> anyhow::Result> { + let mut paths = Vec::with_capacity(self.sources.len()); + for (name, content) in &self.sources { + let mut name = name.clone(); + if !name.ends_with(".vy") { + name += ".vy"; + } + + let name_path = Path::new(&name); + anyhow::ensure!( + !name_path.is_absolute(), + "absolute contract filename: {name}" + ); + let normal_components = name_path + .components() + .all(|component| matches!(component, path::Component::Normal(_))); + anyhow::ensure!( + normal_components, + "contract filename contains disallowed components: {name}" + ); + + let path = root_dir.join(name_path); + if let Some(prefix) = path.parent() { + fs::create_dir_all(prefix) + .await + .with_context(|| format!("failed creating parent dir for `{name}`"))?; + } + let mut file = fs::File::create(&path) + .await + .with_context(|| format!("failed creating file for `{name}`"))?; + file.write_all(content.as_bytes()) + .await + .with_context(|| format!("failed writing to `{name}`"))?; + paths.push(path.into_os_string()); + } + Ok(paths) + } +} + +#[derive(Debug)] +pub(crate) struct ZkVyper { + paths: CompilerPaths, +} + +impl ZkVyper { + pub fn new(paths: CompilerPaths) -> Self { + Self { paths } + } + + fn parse_output( + output: &serde_json::Value, + contract_name: String, + ) -> Result { + let file_name = format!("{contract_name}.vy"); + let object = output + .as_object() + .context("Vyper output is not an object")?; + for (path, artifact) in object { + let path = Path::new(&path); + if path.file_name().unwrap().to_str().unwrap() == file_name { + let bytecode_str = artifact["bytecode"] + .as_str() + .context("bytecode is not a string")?; + let bytecode_without_prefix = + bytecode_str.strip_prefix("0x").unwrap_or(bytecode_str); + let bytecode = + hex::decode(bytecode_without_prefix).context("failed decoding bytecode")?; + return Ok(CompilationArtifacts { + abi: artifact["abi"].clone(), + bytecode, + deployed_bytecode: None, + }); + } + } + Err(ContractVerifierError::MissingContract(contract_name)) + } +} + +#[async_trait] +impl Compiler for ZkVyper { + async fn compile( + self: Box, + input: VyperInput, + ) -> Result { + let mut command = tokio::process::Command::new(&self.paths.zk); + if let Some(o) = input.optimizer_mode.as_ref() { + command.arg("-O").arg(o); + } + command + .arg("--vyper") + .arg(&self.paths.base) + .arg("-f") + .arg("combined_json") + .stdout(Stdio::piped()) + .stderr(Stdio::piped()); + + let temp_dir = tokio::task::spawn_blocking(tempfile::tempdir) + .await + .context("panicked creating temporary dir")? + .context("failed creating temporary dir")?; + let file_paths = input + .write_files(temp_dir.path()) + .await + .context("failed writing Vyper files to temp dir")?; + command.args(file_paths); + + let child = command.spawn().context("cannot spawn zkvyper")?; + let output = child.wait_with_output().await.context("zkvyper failed")?; + if output.status.success() { + let output = serde_json::from_slice(&output.stdout) + .context("zkvyper output is not valid JSON")?; + Self::parse_output(&output, input.contract_name) + } else { + Err(ContractVerifierError::CompilerError( + "zkvyper", + String::from_utf8_lossy(&output.stderr).to_string(), + )) + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use super::*; + + #[tokio::test] + async fn sanitizing_contract_paths() { + let mut input = VyperInput { + contract_name: "Test".to_owned(), + file_name: "test.vy".to_owned(), + sources: HashMap::from([("/etc/shadow".to_owned(), String::new())]), + optimizer_mode: None, + }; + + let temp_dir = tempfile::TempDir::new().unwrap(); + let err = input + .write_files(temp_dir.path()) + .await + .unwrap_err() + .to_string(); + assert!(err.contains("absolute"), "{err}"); + + input.sources = HashMap::from([("../../../etc/shadow".to_owned(), String::new())]); + let err = input + .write_files(temp_dir.path()) + .await + .unwrap_err() + .to_string(); + assert!(err.contains("disallowed components"), "{err}"); + } +} diff --git a/core/lib/contract_verifier/src/error.rs b/core/lib/contract_verifier/src/error.rs index c66756d1f121..c777df24e226 100644 --- a/core/lib/contract_verifier/src/error.rs +++ b/core/lib/contract_verifier/src/error.rs @@ -1,19 +1,23 @@ -#[derive(Debug, Clone, thiserror::Error)] +use zksync_dal::DalError; + +#[derive(Debug, thiserror::Error)] pub enum ContractVerifierError { #[error("Internal error")] - InternalError, + Internal(#[from] anyhow::Error), #[error("Deployed bytecode is not equal to generated one from given source")] BytecodeMismatch, + #[error("Creation bytecode is not equal to generated one from given source")] + CreationBytecodeMismatch, #[error("Constructor arguments are not correct")] IncorrectConstructorArguments, #[error("Compilation takes too much time")] CompilationTimeout, #[error("{0} error: {1}")] - CompilerError(String, String), + CompilerError(&'static str, String), #[error("Compilation error")] CompilationError(serde_json::Value), #[error("Unknown {0} version: {1}")] - UnknownCompilerVersion(String, String), + UnknownCompilerVersion(&'static str, String), #[error("Contract with {0} name is missing in sources")] MissingContract(String), #[error("There is no {0} source file")] @@ -23,3 +27,9 @@ pub enum ContractVerifierError { #[error("Failed to deserialize standard JSON input")] FailedToDeserializeInput, } + +impl From for ContractVerifierError { + fn from(err: DalError) -> Self { + Self::Internal(err.generalize()) + } +} diff --git a/core/lib/contract_verifier/src/lib.rs b/core/lib/contract_verifier/src/lib.rs index c8d9b89d834c..7dc5d47d4562 100644 --- a/core/lib/contract_verifier/src/lib.rs +++ b/core/lib/contract_verifier/src/lib.rs @@ -1,101 +1,258 @@ +//! Contract verifier able to verify contracts created with `zksolc` or `zkvyper` toolchains. + use std::{ - collections::HashMap, - path::{Path, PathBuf}, + fmt, + sync::Arc, time::{Duration, Instant}, }; use anyhow::Context as _; use chrono::Utc; use ethabi::{Contract, Token}; -use lazy_static::lazy_static; -use regex::Regex; use tokio::time; -use zksync_config::ContractVerifierConfig; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_dal::{contract_verification_dal::DeployedContractData, ConnectionPool, Core, CoreDal}; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::{ + bytecode::{trim_padded_evm_bytecode, BytecodeMarker}, contract_verification_api::{ - CompilationArtifacts, CompilerType, DeployContractCalldata, SourceCodeData, - VerificationInfo, VerificationRequest, + self as api, CompilationArtifacts, VerificationIncomingRequest, VerificationInfo, + VerificationRequest, }, - Address, + Address, CONTRACT_DEPLOYER_ADDRESS, }; -use zksync_utils::env::Workspace; use crate::{ + compilers::{Solc, VyperInput, ZkSolc}, error::ContractVerifierError, metrics::API_CONTRACT_VERIFIER_METRICS, - zksolc_utils::{Optimizer, Settings, Source, StandardJson, ZkSolc, ZkSolcInput, ZkSolcOutput}, - zkvyper_utils::{ZkVyper, ZkVyperInput}, + resolver::{CompilerResolver, EnvCompilerResolver}, }; +mod compilers; pub mod error; mod metrics; -mod zksolc_utils; -mod zkvyper_utils; +mod resolver; +#[cfg(test)] +mod tests; -lazy_static! { - static ref DEPLOYER_CONTRACT: Contract = zksync_contracts::deployer_contract(); +#[derive(Debug)] +struct ZkCompilerVersions { + /// Version of the base / non-ZK compiler. + pub base: String, + /// Version of the ZK compiler. + pub zk: String, } -fn home_path() -> PathBuf { - Workspace::locate().core() +/// Internal counterpart of `ContractVersions` from API that encompasses all supported compilation modes. +#[derive(Debug)] +enum VersionedCompiler { + Solc(String), + Vyper(String), + ZkSolc(ZkCompilerVersions), + ZkVyper(ZkCompilerVersions), +} + +impl From for VersionedCompiler { + fn from(versions: api::CompilerVersions) -> Self { + match versions { + api::CompilerVersions::Solc { + compiler_solc_version, + compiler_zksolc_version: None, + } => Self::Solc(compiler_solc_version), + + api::CompilerVersions::Solc { + compiler_solc_version, + compiler_zksolc_version: Some(zk), + } => Self::ZkSolc(ZkCompilerVersions { + base: compiler_solc_version, + zk, + }), + + api::CompilerVersions::Vyper { + compiler_vyper_version, + compiler_zkvyper_version: None, + } => Self::Vyper(compiler_vyper_version), + + api::CompilerVersions::Vyper { + compiler_vyper_version, + compiler_zkvyper_version: Some(zk), + } => Self::ZkVyper(ZkCompilerVersions { + base: compiler_vyper_version, + zk, + }), + } + } +} + +impl VersionedCompiler { + fn expected_bytecode_kind(&self) -> BytecodeMarker { + match self { + Self::Solc(_) | Self::Vyper(_) => BytecodeMarker::Evm, + Self::ZkSolc(_) | Self::ZkVyper(_) => BytecodeMarker::EraVm, + } + } } -#[derive(Debug)] enum ConstructorArgs { Check(Vec), Ignore, } -#[derive(Debug)] +impl fmt::Debug for ConstructorArgs { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Check(args) => write!(formatter, "0x{}", hex::encode(args)), + Self::Ignore => formatter.write_str("(ignored)"), + } + } +} + +#[derive(Debug, Clone)] pub struct ContractVerifier { - config: ContractVerifierConfig, + compilation_timeout: Duration, + contract_deployer: Contract, connection_pool: ConnectionPool, + compiler_resolver: Arc, } impl ContractVerifier { - pub fn new(config: ContractVerifierConfig, connection_pool: ConnectionPool) -> Self { - Self { - config, + /// Creates a new verifier instance. + pub async fn new( + compilation_timeout: Duration, + connection_pool: ConnectionPool, + ) -> anyhow::Result { + Self::with_resolver( + compilation_timeout, connection_pool, + Arc::::default(), + ) + .await + } + + async fn with_resolver( + compilation_timeout: Duration, + connection_pool: ConnectionPool, + compiler_resolver: Arc, + ) -> anyhow::Result { + let this = Self { + compilation_timeout, + contract_deployer: zksync_contracts::deployer_contract(), + connection_pool, + compiler_resolver, + }; + this.sync_compiler_versions().await?; + Ok(this) + } + + /// Synchronizes compiler versions. + #[tracing::instrument(level = "debug", skip_all)] + async fn sync_compiler_versions(&self) -> anyhow::Result<()> { + let supported_versions = self + .compiler_resolver + .supported_versions() + .await + .context("cannot get supported compilers")?; + if supported_versions.lacks_any_compiler() { + tracing::warn!( + ?supported_versions, + "contract verifier lacks support of at least one compiler entirely; it may be incorrectly set up" + ); } + tracing::info!( + ?supported_versions, + "persisting supported compiler versions" + ); + + let mut storage = self + .connection_pool + .connection_tagged("contract_verifier") + .await?; + let mut transaction = storage.start_transaction().await?; + transaction + .contract_verification_dal() + .set_zksolc_versions(&supported_versions.zksolc) + .await?; + transaction + .contract_verification_dal() + .set_solc_versions(&supported_versions.solc) + .await?; + transaction + .contract_verification_dal() + .set_zkvyper_versions(&supported_versions.zkvyper) + .await?; + transaction + .contract_verification_dal() + .set_vyper_versions(&supported_versions.vyper) + .await?; + transaction.commit().await?; + Ok(()) } + #[tracing::instrument( + level = "debug", + skip_all, + err, + fields(id = request.id, addr = ?request.req.contract_address) + )] async fn verify( - storage: &mut Connection<'_, Core>, + &self, mut request: VerificationRequest, - config: ContractVerifierConfig, ) -> Result { - let artifacts = Self::compile(request.clone(), config).await?; - // Bytecode should be present because it is checked when accepting request. - let (deployed_bytecode, creation_tx_calldata) = storage + let mut storage = self + .connection_pool + .connection_tagged("contract_verifier") + .await?; + let deployed_contract = storage .contract_verification_dal() - .get_contract_info_for_verification(request.req.contract_address).await - .unwrap() - .ok_or_else(|| { - tracing::warn!("Contract is missing in DB for already accepted verification request. Contract address: {:#?}", request.req.contract_address); - ContractVerifierError::InternalError + .get_contract_info_for_verification(request.req.contract_address) + .await? + .with_context(|| { + format!( + "Contract is missing in DB for already accepted verification request. Contract address: {:#?}", + request.req.contract_address + ) })?; - let constructor_args = Self::decode_constructor_arguments_from_calldata( - creation_tx_calldata, - request.req.contract_address, - ); + drop(storage); + + let bytecode_marker = BytecodeMarker::new(deployed_contract.bytecode_hash) + .context("unknown bytecode kind")?; + let artifacts = self.compile(request.req.clone(), bytecode_marker).await?; + let constructor_args = match bytecode_marker { + BytecodeMarker::EraVm => self + .decode_era_vm_constructor_args(&deployed_contract, request.req.contract_address)?, + BytecodeMarker::Evm => Self::decode_evm_constructor_args( + request.id, + &deployed_contract, + &artifacts.bytecode, + )?, + }; + + let deployed_bytecode = match bytecode_marker { + BytecodeMarker::EraVm => deployed_contract.bytecode.as_slice(), + BytecodeMarker::Evm => trim_padded_evm_bytecode(&deployed_contract.bytecode) + .context("invalid stored EVM bytecode")?, + }; - if artifacts.bytecode != deployed_bytecode { + if artifacts.deployed_bytecode() != deployed_bytecode { tracing::info!( - "Bytecode mismatch req {}, deployed: 0x{}, compiled 0x{}", - request.id, - hex::encode(deployed_bytecode), - hex::encode(artifacts.bytecode) + request_id = request.id, + deployed = hex::encode(deployed_bytecode), + compiled = hex::encode(artifacts.deployed_bytecode()), + "Deployed (runtime) bytecode mismatch", ); return Err(ContractVerifierError::BytecodeMismatch); } match constructor_args { ConstructorArgs::Check(args) => { - if request.req.constructor_arguments.0 != args { + let provided_constructor_args = &request.req.constructor_arguments.0; + if *provided_constructor_args != args { + tracing::trace!( + "Constructor args mismatch, deployed: 0x{}, provided in request: 0x{}", + hex::encode(&args), + hex::encode(provided_constructor_args) + ); return Err(ContractVerifierError::IncorrectConstructorArguments); } } @@ -104,360 +261,281 @@ impl ContractVerifier { } } + let verified_at = Utc::now(); + tracing::trace!(%verified_at, "verified request"); Ok(VerificationInfo { request, artifacts, - verified_at: Utc::now(), + verified_at, }) } async fn compile_zksolc( - request: VerificationRequest, - config: ContractVerifierConfig, + &self, + version: &ZkCompilerVersions, + req: VerificationIncomingRequest, ) -> Result { - // Users may provide either just contract name or - // source file name and contract name joined with ":". - let (file_name, contract_name) = - if let Some((file_name, contract_name)) = request.req.contract_name.rsplit_once(':') { - (file_name.to_string(), contract_name.to_string()) - } else { - ( - format!("{}.sol", request.req.contract_name), - request.req.contract_name.clone(), - ) - }; - let input = Self::build_zksolc_input(request.clone(), file_name.clone())?; - - let zksolc_path = Path::new(&home_path()) - .join("etc") - .join("zksolc-bin") - .join(request.req.compiler_versions.zk_compiler_version()) - .join("zksolc"); - if !zksolc_path.exists() { - return Err(ContractVerifierError::UnknownCompilerVersion( - "zksolc".to_string(), - request.req.compiler_versions.zk_compiler_version(), - )); - } + let zksolc = self.compiler_resolver.resolve_zksolc(version).await?; + tracing::debug!(?zksolc, ?version, "resolved compiler"); + let input = ZkSolc::build_input(req)?; - let solc_path = Path::new(&home_path()) - .join("etc") - .join("solc-bin") - .join(request.req.compiler_versions.compiler_version()) - .join("solc"); - if !solc_path.exists() { - return Err(ContractVerifierError::UnknownCompilerVersion( - "solc".to_string(), - request.req.compiler_versions.compiler_version(), - )); - } + time::timeout(self.compilation_timeout, zksolc.compile(input)) + .await + .map_err(|_| ContractVerifierError::CompilationTimeout)? + } - let zksolc = ZkSolc::new( - zksolc_path, - solc_path, - request.req.compiler_versions.zk_compiler_version(), - ); + async fn compile_zkvyper( + &self, + version: &ZkCompilerVersions, + req: VerificationIncomingRequest, + ) -> Result { + let zkvyper = self.compiler_resolver.resolve_zkvyper(version).await?; + tracing::debug!(?zkvyper, ?version, "resolved compiler"); + let input = VyperInput::new(req)?; + time::timeout(self.compilation_timeout, zkvyper.compile(input)) + .await + .map_err(|_| ContractVerifierError::CompilationTimeout)? + } - let output = time::timeout(config.compilation_timeout(), zksolc.async_compile(input)) + async fn compile_solc( + &self, + version: &str, + req: VerificationIncomingRequest, + ) -> Result { + let solc = self.compiler_resolver.resolve_solc(version).await?; + tracing::debug!(?solc, ?req.compiler_versions, "resolved compiler"); + let input = Solc::build_input(req)?; + + time::timeout(self.compilation_timeout, solc.compile(input)) .await - .map_err(|_| ContractVerifierError::CompilationTimeout)??; - - match output { - ZkSolcOutput::StandardJson(output) => { - if let Some(errors) = output.get("errors") { - let errors = errors.as_array().unwrap().clone(); - if errors - .iter() - .any(|err| err["severity"].as_str().unwrap() == "error") - { - let error_messages = errors - .into_iter() - .map(|err| err["formattedMessage"].clone()) - .collect(); - return Err(ContractVerifierError::CompilationError( - serde_json::Value::Array(error_messages), - )); - } - } + .map_err(|_| ContractVerifierError::CompilationTimeout)? + } - let contracts = output["contracts"] - .get(file_name.as_str()) - .cloned() - .ok_or(ContractVerifierError::MissingSource(file_name))?; - let contract = contracts - .get(&contract_name) - .cloned() - .ok_or(ContractVerifierError::MissingContract(contract_name))?; - let bytecode_str = contract["evm"]["bytecode"]["object"].as_str().ok_or( - ContractVerifierError::AbstractContract(request.req.contract_name), - )?; - let bytecode = hex::decode(bytecode_str).unwrap(); - let abi = contract["abi"].clone(); - if !abi.is_array() { - tracing::error!( - "zksolc returned unexpected value for ABI: {}", - serde_json::to_string_pretty(&abi).unwrap() - ); - return Err(ContractVerifierError::InternalError); - } + async fn compile_vyper( + &self, + version: &str, + req: VerificationIncomingRequest, + ) -> Result { + let vyper = self.compiler_resolver.resolve_vyper(version).await?; + tracing::debug!(?vyper, ?req.compiler_versions, "resolved compiler"); + let input = VyperInput::new(req)?; - Ok(CompilationArtifacts { bytecode, abi }) - } - ZkSolcOutput::YulSingleFile(output) => { - let re = Regex::new(r"Contract `.*` bytecode: 0x([\da-f]+)").unwrap(); - let cap = re.captures(&output).unwrap(); - let bytecode_str = cap.get(1).unwrap().as_str(); - let bytecode = hex::decode(bytecode_str).unwrap(); - Ok(CompilationArtifacts { - bytecode, - abi: serde_json::Value::Array(Vec::new()), - }) - } - } + time::timeout(self.compilation_timeout, vyper.compile(input)) + .await + .map_err(|_| ContractVerifierError::CompilationTimeout)? } - async fn compile_zkvyper( - request: VerificationRequest, - config: ContractVerifierConfig, + #[tracing::instrument(level = "debug", skip_all)] + async fn compile( + &self, + req: VerificationIncomingRequest, + bytecode_marker: BytecodeMarker, ) -> Result { - // Users may provide either just contract name or - // source file name and contract name joined with ":". - let contract_name = - if let Some((_file_name, contract_name)) = request.req.contract_name.rsplit_once(':') { - contract_name.to_string() - } else { - request.req.contract_name.clone() - }; - let input = Self::build_zkvyper_input(request.clone())?; - - let zkvyper_path = Path::new(&home_path()) - .join("etc") - .join("zkvyper-bin") - .join(request.req.compiler_versions.zk_compiler_version()) - .join("zkvyper"); - if !zkvyper_path.exists() { - return Err(ContractVerifierError::UnknownCompilerVersion( - "zkvyper".to_string(), - request.req.compiler_versions.zk_compiler_version(), - )); + let compiler_type = req.source_code_data.compiler_type(); + let compiler_type_by_versions = req.compiler_versions.compiler_type(); + if compiler_type != compiler_type_by_versions { + // Should be checked when receiving a request, so here it's more of a sanity check + let err = anyhow::anyhow!( + "specified compiler versions {:?} belong to a differing toolchain than source code ({compiler_type:?})", + req.compiler_versions + ); + return Err(err.into()); } - let vyper_path = Path::new(&home_path()) - .join("etc") - .join("vyper-bin") - .join(request.req.compiler_versions.compiler_version()) - .join("vyper"); - if !vyper_path.exists() { - return Err(ContractVerifierError::UnknownCompilerVersion( - "vyper".to_string(), - request.req.compiler_versions.compiler_version(), - )); + let compiler = VersionedCompiler::from(req.compiler_versions.clone()); + if compiler.expected_bytecode_kind() != bytecode_marker { + let err = anyhow::anyhow!( + "bytecode kind expected by compiler {compiler:?} differs from the actual bytecode kind \ + of the verified contract ({bytecode_marker:?})", + ); + return Err(err.into()); } - let zkvyper = ZkVyper::new(zkvyper_path, vyper_path); - - let output = time::timeout(config.compilation_timeout(), zkvyper.async_compile(input)) - .await - .map_err(|_| ContractVerifierError::CompilationTimeout)??; - - let file_name = format!("{contract_name}.vy"); - let object = output - .as_object() - .cloned() - .ok_or(ContractVerifierError::InternalError)?; - for (path, artifact) in object { - let path = Path::new(&path); - if path.file_name().unwrap().to_str().unwrap() == file_name { - let bytecode_str = artifact["bytecode"] - .as_str() - .ok_or(ContractVerifierError::InternalError)?; - let bytecode_without_prefix = - bytecode_str.strip_prefix("0x").unwrap_or(bytecode_str); - let bytecode = hex::decode(bytecode_without_prefix).unwrap(); - return Ok(CompilationArtifacts { - abi: artifact["abi"].clone(), - bytecode, - }); - } + match &compiler { + VersionedCompiler::Solc(version) => self.compile_solc(version, req).await, + VersionedCompiler::Vyper(version) => self.compile_vyper(version, req).await, + VersionedCompiler::ZkSolc(version) => self.compile_zksolc(version, req).await, + VersionedCompiler::ZkVyper(version) => self.compile_zkvyper(version, req).await, } - - Err(ContractVerifierError::MissingContract(contract_name)) } - pub async fn compile( - request: VerificationRequest, - config: ContractVerifierConfig, - ) -> Result { - match request.req.source_code_data.compiler_type() { - CompilerType::Solc => Self::compile_zksolc(request, config).await, - CompilerType::Vyper => Self::compile_zkvyper(request, config).await, + /// All returned errors are internal. + #[tracing::instrument(level = "trace", skip_all, ret, err)] + fn decode_era_vm_constructor_args( + &self, + contract: &DeployedContractData, + contract_address_to_verify: Address, + ) -> anyhow::Result { + let Some(calldata) = &contract.calldata else { + return Ok(ConstructorArgs::Ignore); + }; + + if contract.contract_address == Some(CONTRACT_DEPLOYER_ADDRESS) { + self.decode_contract_deployer_call(calldata, contract_address_to_verify) + } else { + Ok(ConstructorArgs::Ignore) } } - fn build_zksolc_input( - request: VerificationRequest, - file_name: String, - ) -> Result { - let default_output_selection = serde_json::json!( - { - "*": { - "*": [ "abi" ], - "": [ "abi" ] - } - } + fn decode_contract_deployer_call( + &self, + calldata: &[u8], + contract_address_to_verify: Address, + ) -> anyhow::Result { + anyhow::ensure!( + calldata.len() >= 4, + "calldata doesn't include Solidity function selector" ); - match request.req.source_code_data { - SourceCodeData::SolSingleFile(source_code) => { - let source = Source { - content: source_code, - }; - let sources: HashMap = - vec![(file_name, source)].into_iter().collect(); - let optimizer = Optimizer { - enabled: request.req.optimization_used, - mode: request.req.optimizer_mode.and_then(|s| s.chars().next()), - }; - let optimizer_value = serde_json::to_value(optimizer).unwrap(); - - let settings = Settings { - output_selection: Some(default_output_selection), - is_system: request.req.is_system, - force_evmla: request.req.force_evmla, - other: serde_json::Value::Object( - vec![("optimizer".to_string(), optimizer_value)] - .into_iter() - .collect(), - ), - }; - - Ok(ZkSolcInput::StandardJson(StandardJson { - language: "Solidity".to_string(), - sources, - settings, - })) + let contract_deployer = &self.contract_deployer; + let create = contract_deployer + .function("create") + .context("no `create` in contract deployer ABI")?; + let create2 = contract_deployer + .function("create2") + .context("no `create2` in contract deployer ABI")?; + let create_acc = contract_deployer + .function("createAccount") + .context("no `createAccount` in contract deployer ABI")?; + let create2_acc = contract_deployer + .function("create2Account") + .context("no `create2Account` in contract deployer ABI")?; + let force_deploy = contract_deployer + .function("forceDeployOnAddresses") + .context("no `forceDeployOnAddresses` in contract deployer ABI")?; + + let (selector, token_data) = calldata.split_at(4); + // It's assumed that `create` and `create2` methods have the same parameters + // and the same for `createAccount` and `create2Account`. + Ok(match selector { + selector + if selector == create.short_signature() + || selector == create2.short_signature() => + { + let tokens = create + .decode_input(token_data) + .context("failed to decode `create` / `create2` input")?; + // Constructor arguments are in the third parameter. + ConstructorArgs::Check( + tokens[2] + .clone() + .into_bytes() + .context("third parameter of `create/create2` should be of type `bytes`")?, + ) } - SourceCodeData::StandardJsonInput(map) => { - let mut compiler_input: StandardJson = - serde_json::from_value(serde_json::Value::Object(map)) - .map_err(|_| ContractVerifierError::FailedToDeserializeInput)?; - // Set default output selection even if it is different in request. - compiler_input.settings.output_selection = Some(default_output_selection); - Ok(ZkSolcInput::StandardJson(compiler_input)) + selector + if selector == create_acc.short_signature() + || selector == create2_acc.short_signature() => + { + let tokens = create + .decode_input(token_data) + .context("failed to decode `createAccount` / `create2Account` input")?; + // Constructor arguments are in the third parameter. + ConstructorArgs::Check(tokens[2].clone().into_bytes().context( + "third parameter of `createAccount/create2Account` should be of type `bytes`", + )?) } - SourceCodeData::YulSingleFile(source_code) => Ok(ZkSolcInput::YulSingleFile { - source_code, - is_system: request.req.is_system, - }), - _ => panic!("Unexpected SourceCode variant"), - } - } - - fn build_zkvyper_input( - request: VerificationRequest, - ) -> Result { - let sources = match request.req.source_code_data { - SourceCodeData::VyperMultiFile(s) => s, - _ => panic!("Unexpected SourceCode variant"), - }; - Ok(ZkVyperInput { - sources, - optimizer_mode: request.req.optimizer_mode, + selector if selector == force_deploy.short_signature() => { + Self::decode_force_deployment(token_data, force_deploy, contract_address_to_verify) + .context("failed decoding force deployment")? + } + _ => ConstructorArgs::Ignore, }) } - fn decode_constructor_arguments_from_calldata( - calldata: DeployContractCalldata, + fn decode_force_deployment( + token_data: &[u8], + force_deploy: ðabi::Function, contract_address_to_verify: Address, - ) -> ConstructorArgs { - match calldata { - DeployContractCalldata::Deploy(calldata) => { - let create = DEPLOYER_CONTRACT.function("create").unwrap(); - let create2 = DEPLOYER_CONTRACT.function("create2").unwrap(); - - let create_acc = DEPLOYER_CONTRACT.function("createAccount").unwrap(); - let create2_acc = DEPLOYER_CONTRACT.function("create2Account").unwrap(); - - let force_deploy = DEPLOYER_CONTRACT - .function("forceDeployOnAddresses") - .unwrap(); - // It's assumed that `create` and `create2` methods have the same parameters - // and the same for `createAccount` and `create2Account`. - match &calldata[0..4] { - selector - if selector == create.short_signature() - || selector == create2.short_signature() => - { - let tokens = create - .decode_input(&calldata[4..]) - .expect("Failed to decode input"); - // Constructor arguments are in the third parameter. - ConstructorArgs::Check(tokens[2].clone().into_bytes().expect( - "The third parameter of `create/create2` should be of type `bytes`", - )) - } - selector - if selector == create_acc.short_signature() - || selector == create2_acc.short_signature() => - { - let tokens = create - .decode_input(&calldata[4..]) - .expect("Failed to decode input"); - // Constructor arguments are in the third parameter. - ConstructorArgs::Check( - tokens[2].clone().into_bytes().expect( - "The third parameter of `createAccount/create2Account` should be of type `bytes`", - ), - ) + ) -> anyhow::Result { + let tokens = force_deploy + .decode_input(token_data) + .context("failed to decode `forceDeployOnAddresses` input")?; + let deployments = tokens[0] + .clone() + .into_array() + .context("first parameter of `forceDeployOnAddresses` is not an array")?; + for deployment in deployments { + match deployment { + Token::Tuple(tokens) => { + let address = tokens[1] + .clone() + .into_address() + .context("unexpected `address`")?; + if address == contract_address_to_verify { + let call_constructor = tokens[2] + .clone() + .into_bool() + .context("unexpected `call_constructor`")?; + return Ok(if call_constructor { + let input = tokens[4] + .clone() + .into_bytes() + .context("unexpected constructor input")?; + ConstructorArgs::Check(input) + } else { + ConstructorArgs::Ignore + }); } - selector if selector == force_deploy.short_signature() => { - let tokens = force_deploy - .decode_input(&calldata[4..]) - .expect("Failed to decode input"); - let deployments = tokens[0].clone().into_array().unwrap(); - for deployment in deployments { - match deployment { - Token::Tuple(tokens) => { - let address = tokens[1].clone().into_address().unwrap(); - if address == contract_address_to_verify { - let call_constructor = - tokens[2].clone().into_bool().unwrap(); - return if call_constructor { - let input = tokens[4].clone().into_bytes().unwrap(); - ConstructorArgs::Check(input) - } else { - ConstructorArgs::Ignore - }; - } - } - _ => panic!("Expected `deployment` to be a tuple"), - } - } - panic!("Couldn't find force deployment for given address"); - } - _ => ConstructorArgs::Ignore, } + _ => anyhow::bail!("expected `deployment` to be a tuple"), } - DeployContractCalldata::Ignore => ConstructorArgs::Ignore, } + anyhow::bail!("couldn't find force deployment for address {contract_address_to_verify:?}"); + } + + fn decode_evm_constructor_args( + request_id: usize, + contract: &DeployedContractData, + creation_bytecode: &[u8], + ) -> Result { + let Some(calldata) = &contract.calldata else { + return Ok(ConstructorArgs::Ignore); + }; + if contract.contract_address.is_some() { + // Not an EVM deployment transaction + return Ok(ConstructorArgs::Ignore); + } + + let args = calldata.strip_prefix(creation_bytecode).ok_or_else(|| { + tracing::info!( + request_id, + calldata = hex::encode(calldata), + compiled = hex::encode(creation_bytecode), + "Creation bytecode mismatch" + ); + ContractVerifierError::CreationBytecodeMismatch + })?; + Ok(ConstructorArgs::Check(args.to_vec())) } + #[tracing::instrument(level = "debug", skip_all, err, fields(id = request_id))] async fn process_result( - storage: &mut Connection<'_, Core>, + &self, request_id: usize, verification_result: Result, - ) { + ) -> anyhow::Result<()> { + let mut storage = self + .connection_pool + .connection_tagged("contract_verifier") + .await?; match verification_result { Ok(info) => { storage .contract_verification_dal() .save_verification_info(info) - .await - .unwrap(); - tracing::info!("Successfully processed request with id = {}", request_id); + .await?; + tracing::info!("Successfully processed request with id = {request_id}"); } Err(error) => { - let error_message = error.to_string(); + let error_message = match &error { + ContractVerifierError::Internal(err) => { + // Do not expose the error externally, but log it. + tracing::warn!(request_id, "internal error processing request: {err}"); + "internal error".to_owned() + } + _ => error.to_string(), + }; let compilation_errors = match error { ContractVerifierError::CompilationError(compilation_errors) => { compilation_errors @@ -466,12 +544,12 @@ impl ContractVerifier { }; storage .contract_verification_dal() - .save_verification_error(request_id, error_message, compilation_errors, None) - .await - .unwrap(); - tracing::info!("Request with id = {} was failed", request_id); + .save_verification_error(request_id, &error_message, &compilation_errors, None) + .await?; + tracing::info!("Request with id = {request_id} was failed"); } } + Ok(()) } } @@ -485,33 +563,37 @@ impl JobProcessor for ContractVerifier { const BACKOFF_MULTIPLIER: u64 = 1; async fn get_next_job(&self) -> anyhow::Result> { - let mut connection = self.connection_pool.connection().await.unwrap(); - - // Time overhead for all operations except for compilation. + /// Time overhead for all operations except for compilation. const TIME_OVERHEAD: Duration = Duration::from_secs(10); + let mut connection = self + .connection_pool + .connection_tagged("contract_verifier") + .await?; // Considering that jobs that reach compilation timeout will be executed in // `compilation_timeout` + `non_compilation_time_overhead` (which is significantly less than `compilation_timeout`), // we re-pick up jobs that are being executed for a bit more than `compilation_timeout`. let job = connection .contract_verification_dal() - .get_next_queued_verification_request(self.config.compilation_timeout() + TIME_OVERHEAD) - .await - .context("get_next_queued_verification_request()")?; - + .get_next_queued_verification_request(self.compilation_timeout + TIME_OVERHEAD) + .await?; Ok(job.map(|job| (job.id, job))) } async fn save_failure(&self, job_id: usize, _started_at: Instant, error: String) { - let mut connection = self.connection_pool.connection().await.unwrap(); + let mut connection = self + .connection_pool + .connection_tagged("contract_verifier") + .await + .unwrap(); connection .contract_verification_dal() .save_verification_error( job_id, - "Internal error".to_string(), - serde_json::Value::Array(Vec::new()), - Some(error), + "Internal error", + &serde_json::Value::Array(Vec::new()), + Some(&error), ) .await .unwrap(); @@ -524,16 +606,13 @@ impl JobProcessor for ContractVerifier { job: VerificationRequest, started_at: Instant, ) -> tokio::task::JoinHandle> { - let connection_pool = self.connection_pool.clone(); - let config = self.config.clone(); + let this = self.clone(); tokio::task::spawn(async move { tracing::info!("Started to process request with id = {}", job.id); - let mut connection = connection_pool.connection().await.unwrap(); - let job_id = job.id; - let verification_result = Self::verify(&mut connection, job, config).await; - Self::process_result(&mut connection, job_id, verification_result).await; + let verification_result = this.verify(job).await; + this.process_result(job_id, verification_result).await?; API_CONTRACT_VERIFIER_METRICS .request_processing_time diff --git a/core/lib/contract_verifier/src/metrics.rs b/core/lib/contract_verifier/src/metrics.rs index fd98f51cd560..1c6796cd7f38 100644 --- a/core/lib/contract_verifier/src/metrics.rs +++ b/core/lib/contract_verifier/src/metrics.rs @@ -5,6 +5,7 @@ use vise::{Buckets, Histogram, Metrics}; #[derive(Debug, Metrics)] #[metrics(prefix = "api_contract_verifier")] pub(crate) struct ApiContractVerifierMetrics { + /// Latency of processing a single request. #[metrics(buckets = Buckets::LATENCIES)] pub request_processing_time: Histogram, } diff --git a/core/lib/contract_verifier/src/resolver.rs b/core/lib/contract_verifier/src/resolver.rs new file mode 100644 index 000000000000..018da12a152a --- /dev/null +++ b/core/lib/contract_verifier/src/resolver.rs @@ -0,0 +1,253 @@ +use std::{ + fmt, + path::{Path, PathBuf}, +}; + +use anyhow::Context as _; +use tokio::fs; +use zksync_queued_job_processor::async_trait; +use zksync_types::contract_verification_api::CompilationArtifacts; +use zksync_utils::env::Workspace; + +use crate::{ + compilers::{Solc, SolcInput, Vyper, VyperInput, ZkSolc, ZkSolcInput, ZkVyper}, + error::ContractVerifierError, + ZkCompilerVersions, +}; + +#[derive(Debug, Clone, Copy)] +enum CompilerType { + Solc, + ZkSolc, + Vyper, + ZkVyper, +} + +impl CompilerType { + fn as_str(self) -> &'static str { + match self { + Self::Solc => "solc", + Self::ZkSolc => "zksolc", + Self::Vyper => "vyper", + Self::ZkVyper => "zkvyper", + } + } + + /// Returns the absolute path to the compiler binary. + fn bin_path_unchecked(self, home_dir: &Path, version: &str) -> PathBuf { + let compiler_dir = match self { + Self::Solc => "solc-bin", + Self::ZkSolc => "zksolc-bin", + Self::Vyper => "vyper-bin", + Self::ZkVyper => "zkvyper-bin", + }; + home_dir + .join("etc") + .join(compiler_dir) + .join(version) + .join(self.as_str()) + } + + async fn bin_path( + self, + home_dir: &Path, + version: &str, + ) -> Result { + let path = self.bin_path_unchecked(home_dir, version); + if !fs::try_exists(&path) + .await + .with_context(|| format!("failed accessing `{}`", self.as_str()))? + { + return Err(ContractVerifierError::UnknownCompilerVersion( + self.as_str(), + version.to_owned(), + )); + } + Ok(path) + } +} + +/// Compiler versions supported by a [`CompilerResolver`]. +#[derive(Debug)] +pub(crate) struct SupportedCompilerVersions { + pub solc: Vec, + pub zksolc: Vec, + pub vyper: Vec, + pub zkvyper: Vec, +} + +impl SupportedCompilerVersions { + pub fn lacks_any_compiler(&self) -> bool { + self.solc.is_empty() + || self.zksolc.is_empty() + || self.vyper.is_empty() + || self.zkvyper.is_empty() + } +} + +#[derive(Debug, Clone)] +pub(crate) struct CompilerPaths { + /// Path to the base (non-zk) compiler. + pub base: PathBuf, + /// Path to the zk compiler. + pub zk: PathBuf, +} + +/// Encapsulates compiler paths resolution. +#[async_trait] +pub(crate) trait CompilerResolver: fmt::Debug + Send + Sync { + /// Returns compiler versions supported by this resolver. + /// + /// # Errors + /// + /// Returned errors are assumed to be fatal. + async fn supported_versions(&self) -> anyhow::Result; + + /// Resolves a `solc` compiler. + async fn resolve_solc( + &self, + version: &str, + ) -> Result>, ContractVerifierError>; + + /// Resolves a `zksolc` compiler. + async fn resolve_zksolc( + &self, + version: &ZkCompilerVersions, + ) -> Result>, ContractVerifierError>; + + /// Resolves a `vyper` compiler. + async fn resolve_vyper( + &self, + version: &str, + ) -> Result>, ContractVerifierError>; + + /// Resolves a `zkvyper` compiler. + async fn resolve_zkvyper( + &self, + version: &ZkCompilerVersions, + ) -> Result>, ContractVerifierError>; +} + +/// Encapsulates a one-off compilation process. +#[async_trait] +pub(crate) trait Compiler: Send + fmt::Debug { + /// Performs compilation. + async fn compile( + self: Box, + input: In, + ) -> Result; +} + +/// Default [`CompilerResolver`] using pre-downloaded compilers in the `/etc` subdirectories (relative to the workspace). +#[derive(Debug)] +pub(crate) struct EnvCompilerResolver { + home_dir: PathBuf, +} + +impl Default for EnvCompilerResolver { + fn default() -> Self { + Self { + home_dir: Workspace::locate().core(), + } + } +} + +impl EnvCompilerResolver { + async fn read_dir(&self, dir: &str) -> anyhow::Result> { + let mut dir_entries = fs::read_dir(self.home_dir.join(dir)) + .await + .context("failed reading dir")?; + let mut versions = vec![]; + while let Some(entry) = dir_entries.next_entry().await? { + let Ok(file_type) = entry.file_type().await else { + continue; + }; + if file_type.is_dir() { + if let Ok(name) = entry.file_name().into_string() { + versions.push(name); + } + } + } + Ok(versions) + } +} + +#[async_trait] +impl CompilerResolver for EnvCompilerResolver { + async fn supported_versions(&self) -> anyhow::Result { + Ok(SupportedCompilerVersions { + solc: self + .read_dir("etc/solc-bin") + .await + .context("failed reading solc dir")?, + zksolc: self + .read_dir("etc/zksolc-bin") + .await + .context("failed reading zksolc dir")?, + vyper: self + .read_dir("etc/vyper-bin") + .await + .context("failed reading vyper dir")?, + zkvyper: self + .read_dir("etc/zkvyper-bin") + .await + .context("failed reading zkvyper dir")?, + }) + } + + async fn resolve_solc( + &self, + version: &str, + ) -> Result>, ContractVerifierError> { + let solc_path = CompilerType::Solc.bin_path(&self.home_dir, version).await?; + Ok(Box::new(Solc::new(solc_path))) + } + + async fn resolve_zksolc( + &self, + version: &ZkCompilerVersions, + ) -> Result>, ContractVerifierError> { + let zksolc_version = &version.zk; + let zksolc_path = CompilerType::ZkSolc + .bin_path(&self.home_dir, zksolc_version) + .await?; + let solc_path = CompilerType::Solc + .bin_path(&self.home_dir, &version.base) + .await?; + let compiler_paths = CompilerPaths { + base: solc_path, + zk: zksolc_path, + }; + Ok(Box::new(ZkSolc::new( + compiler_paths, + zksolc_version.to_owned(), + ))) + } + + async fn resolve_vyper( + &self, + version: &str, + ) -> Result>, ContractVerifierError> { + let vyper_path = CompilerType::Vyper + .bin_path(&self.home_dir, version) + .await?; + Ok(Box::new(Vyper::new(vyper_path))) + } + + async fn resolve_zkvyper( + &self, + version: &ZkCompilerVersions, + ) -> Result>, ContractVerifierError> { + let zkvyper_path = CompilerType::ZkVyper + .bin_path(&self.home_dir, &version.zk) + .await?; + let vyper_path = CompilerType::Vyper + .bin_path(&self.home_dir, &version.base) + .await?; + let compiler_paths = CompilerPaths { + base: vyper_path, + zk: zkvyper_path, + }; + Ok(Box::new(ZkVyper::new(compiler_paths))) + } +} diff --git a/core/lib/contract_verifier/src/tests/mod.rs b/core/lib/contract_verifier/src/tests/mod.rs new file mode 100644 index 000000000000..395d467542dc --- /dev/null +++ b/core/lib/contract_verifier/src/tests/mod.rs @@ -0,0 +1,808 @@ +//! Tests for the contract verifier. + +use std::{collections::HashMap, iter}; + +use test_casing::{test_casing, Product}; +use tokio::sync::watch; +use zksync_dal::Connection; +use zksync_node_test_utils::{create_l1_batch, create_l2_block}; +use zksync_types::{ + address_to_h256, + bytecode::BytecodeHash, + contract_verification_api::{CompilerVersions, SourceCodeData, VerificationIncomingRequest}, + get_code_key, get_known_code_key, + l2::L2Tx, + tx::IncludedTxLocation, + Execute, L1BatchNumber, L2BlockNumber, ProtocolVersion, StorageLog, CONTRACT_DEPLOYER_ADDRESS, + H256, U256, +}; +use zksync_vm_interface::{tracer::ValidationTraces, TransactionExecutionMetrics, VmEvent}; + +use super::*; +use crate::{ + compilers::{SolcInput, VyperInput, ZkSolcInput}, + resolver::{Compiler, SupportedCompilerVersions}, +}; + +mod real; + +const SOLC_VERSION: &str = "0.8.27"; +const ZKSOLC_VERSION: &str = "1.5.4"; + +const BYTECODE_KINDS: [BytecodeMarker; 2] = [BytecodeMarker::EraVm, BytecodeMarker::Evm]; + +const COUNTER_CONTRACT: &str = r#" + contract Counter { + uint256 value; + + function increment(uint256 x) external { + value += x; + } + } +"#; +const COUNTER_CONTRACT_WITH_CONSTRUCTOR: &str = r#" + contract Counter { + uint256 value; + + constructor(uint256 _value) { + value = _value; + } + + function increment(uint256 x) external { + value += x; + } + } +"#; +const COUNTER_CONTRACT_WITH_INTERFACE: &str = r#" + interface ICounter { + function increment(uint256 x) external; + } + + contract Counter is ICounter { + uint256 value; + + function increment(uint256 x) external override { + value += x; + } + } +"#; +const COUNTER_VYPER_CONTRACT: &str = r#" +#pragma version ^0.3.10 + +value: uint256 + +@external +def increment(x: uint256): + self.value += x +"#; +const EMPTY_YUL_CONTRACT: &str = r#" +object "Empty" { + code { + mstore(0, 0) + return(0, 32) + } + object "Empty_deployed" { + code { } + } +} +"#; + +#[derive(Debug, Clone, Copy)] +enum TestContract { + Counter, + CounterWithConstructor, +} + +impl TestContract { + const ALL: [Self; 2] = [Self::Counter, Self::CounterWithConstructor]; + + fn source(self) -> &'static str { + match self { + Self::Counter => COUNTER_CONTRACT, + Self::CounterWithConstructor => COUNTER_CONTRACT_WITH_CONSTRUCTOR, + } + } + + fn constructor_args(self) -> &'static [Token] { + match self { + Self::Counter => &[], + Self::CounterWithConstructor => &[Token::Uint(U256([42, 0, 0, 0]))], + } + } +} + +/// Pads an EVM bytecode in the same ways it's done by system contracts. +fn pad_evm_bytecode(deployed_bytecode: &[u8]) -> Vec { + let mut padded = Vec::with_capacity(deployed_bytecode.len() + 32); + let len = U256::from(deployed_bytecode.len()); + padded.extend_from_slice(&[0; 32]); + len.to_big_endian(&mut padded); + padded.extend_from_slice(deployed_bytecode); + + // Pad to the 32-byte word boundary. + if padded.len() % 32 != 0 { + padded.extend(iter::repeat(0).take(32 - padded.len() % 32)); + } + assert_eq!(padded.len() % 32, 0); + + // Pad to contain the odd number of words. + if (padded.len() / 32) % 2 != 1 { + padded.extend_from_slice(&[0; 32]); + } + assert_eq!((padded.len() / 32) % 2, 1); + padded +} + +async fn mock_deployment( + storage: &mut Connection<'_, Core>, + address: Address, + bytecode: Vec, + constructor_args: &[Token], +) { + let bytecode_hash = BytecodeHash::for_bytecode(&bytecode).value(); + let deployment = Execute::for_deploy(H256::zero(), bytecode.clone(), constructor_args); + mock_deployment_inner(storage, address, bytecode_hash, bytecode, deployment).await; +} + +async fn mock_evm_deployment( + storage: &mut Connection<'_, Core>, + address: Address, + creation_bytecode: Vec, + deployed_bytecode: &[u8], + constructor_args: &[Token], +) { + let mut calldata = creation_bytecode; + calldata.extend_from_slice(ðabi::encode(constructor_args)); + let deployment = Execute { + contract_address: None, + calldata, + value: 0.into(), + factory_deps: vec![], + }; + let bytecode = pad_evm_bytecode(deployed_bytecode); + let bytecode_hash = BytecodeHash::for_evm_bytecode(&bytecode).value(); + mock_deployment_inner(storage, address, bytecode_hash, bytecode, deployment).await; +} + +async fn mock_deployment_inner( + storage: &mut Connection<'_, Core>, + address: Address, + bytecode_hash: H256, + bytecode: Vec, + execute: Execute, +) { + let logs = [ + StorageLog::new_write_log(get_code_key(&address), bytecode_hash), + StorageLog::new_write_log(get_known_code_key(&bytecode_hash), H256::from_low_u64_be(1)), + ]; + storage + .storage_logs_dal() + .append_storage_logs(L2BlockNumber(0), &logs) + .await + .unwrap(); + storage + .factory_deps_dal() + .insert_factory_deps( + L2BlockNumber(0), + &HashMap::from([(bytecode_hash, bytecode.clone())]), + ) + .await + .unwrap(); + + let mut deploy_tx = L2Tx { + execute, + common_data: Default::default(), + received_timestamp_ms: 0, + raw_bytes: Some(vec![0; 128].into()), + }; + deploy_tx.set_input(vec![0; 128], H256::repeat_byte(0x23)); + storage + .transactions_dal() + .insert_transaction_l2( + &deploy_tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) + .await + .unwrap(); + + let deployer_address = Address::repeat_byte(0xff); + let location = IncludedTxLocation { + tx_hash: deploy_tx.hash(), + tx_index_in_l2_block: 0, + tx_initiator_address: deployer_address, + }; + let deploy_event = VmEvent { + location: (L1BatchNumber(0), 0), + address: CONTRACT_DEPLOYER_ADDRESS, + indexed_topics: vec![ + VmEvent::DEPLOY_EVENT_SIGNATURE, + address_to_h256(&deployer_address), + bytecode_hash, + address_to_h256(&address), + ], + value: vec![], + }; + storage + .events_dal() + .save_events(L2BlockNumber(0), &[(location, vec![&deploy_event])]) + .await + .unwrap(); +} + +type SharedMockFn = + Arc Result + Send + Sync>; + +#[derive(Clone)] +struct MockCompilerResolver { + zksolc: SharedMockFn, + solc: SharedMockFn, +} + +impl fmt::Debug for MockCompilerResolver { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("MockCompilerResolver") + .finish_non_exhaustive() + } +} + +impl MockCompilerResolver { + fn zksolc( + zksolc: impl Fn(ZkSolcInput) -> CompilationArtifacts + 'static + Send + Sync, + ) -> Self { + Self { + zksolc: Arc::new(move |input| Ok(zksolc(input))), + solc: Arc::new(|input| panic!("unexpected solc call: {input:?}")), + } + } + + fn solc(solc: impl Fn(SolcInput) -> CompilationArtifacts + 'static + Send + Sync) -> Self { + Self { + solc: Arc::new(move |input| Ok(solc(input))), + zksolc: Arc::new(|input| panic!("unexpected zksolc call: {input:?}")), + } + } +} + +#[async_trait] +impl Compiler for MockCompilerResolver { + async fn compile( + self: Box, + input: ZkSolcInput, + ) -> Result { + (self.zksolc)(input) + } +} + +#[async_trait] +impl Compiler for MockCompilerResolver { + async fn compile( + self: Box, + input: SolcInput, + ) -> Result { + (self.solc)(input) + } +} + +#[async_trait] +impl CompilerResolver for MockCompilerResolver { + async fn supported_versions(&self) -> anyhow::Result { + Ok(SupportedCompilerVersions { + solc: vec![SOLC_VERSION.to_owned()], + zksolc: vec![ZKSOLC_VERSION.to_owned()], + vyper: vec![], + zkvyper: vec![], + }) + } + + async fn resolve_solc( + &self, + version: &str, + ) -> Result>, ContractVerifierError> { + if version != SOLC_VERSION { + return Err(ContractVerifierError::UnknownCompilerVersion( + "solc", + version.to_owned(), + )); + } + Ok(Box::new(self.clone())) + } + + async fn resolve_zksolc( + &self, + version: &ZkCompilerVersions, + ) -> Result>, ContractVerifierError> { + if version.base != SOLC_VERSION { + return Err(ContractVerifierError::UnknownCompilerVersion( + "solc", + version.base.clone(), + )); + } + if version.zk != ZKSOLC_VERSION { + return Err(ContractVerifierError::UnknownCompilerVersion( + "zksolc", + version.zk.clone(), + )); + } + Ok(Box::new(self.clone())) + } + + async fn resolve_vyper( + &self, + _version: &str, + ) -> Result>, ContractVerifierError> { + unreachable!("not tested") + } + + async fn resolve_zkvyper( + &self, + _version: &ZkCompilerVersions, + ) -> Result>, ContractVerifierError> { + unreachable!("not tested") + } +} + +fn test_request(address: Address, source: &str) -> VerificationIncomingRequest { + VerificationIncomingRequest { + contract_address: address, + source_code_data: SourceCodeData::SolSingleFile(source.into()), + contract_name: "Counter".to_owned(), + compiler_versions: CompilerVersions::Solc { + compiler_zksolc_version: Some(ZKSOLC_VERSION.to_owned()), + compiler_solc_version: SOLC_VERSION.to_owned(), + }, + optimization_used: true, + optimizer_mode: None, + constructor_arguments: Default::default(), + is_system: false, + force_evmla: false, + } +} + +fn counter_contract_abi() -> serde_json::Value { + serde_json::json!([{ + "inputs": [{ + "internalType": "uint256", + "name": "x", + "type": "uint256", + }], + "name": "increment", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function", + }]) +} + +async fn prepare_storage(storage: &mut Connection<'_, Core>) { + // Storage must contain at least 1 block / batch for verifier-related queries to work correctly. + storage + .protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + storage + .blocks_dal() + .insert_l2_block(&create_l2_block(0)) + .await + .unwrap(); + storage + .blocks_dal() + .insert_mock_l1_batch(&create_l1_batch(0)) + .await + .unwrap(); +} + +#[test_casing(2, TestContract::ALL)] +#[tokio::test] +async fn contract_verifier_basics(contract: TestContract) { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + let expected_bytecode = vec![0_u8; 32]; + + prepare_storage(&mut storage).await; + let address = Address::repeat_byte(1); + mock_deployment( + &mut storage, + address, + expected_bytecode.clone(), + contract.constructor_args(), + ) + .await; + let mut req = test_request(address, contract.source()); + req.constructor_arguments = ethabi::encode(contract.constructor_args()).into(); + let request_id = storage + .contract_verification_dal() + .add_contract_verification_request(&req) + .await + .unwrap(); + + let mock_resolver = MockCompilerResolver::zksolc(|input| { + let ZkSolcInput::StandardJson { input, .. } = &input else { + panic!("unexpected input"); + }; + assert_eq!(input.language, "Solidity"); + assert_eq!(input.sources.len(), 1); + let source = input.sources.values().next().unwrap(); + assert!(source.content.contains("contract Counter"), "{source:?}"); + + CompilationArtifacts { + bytecode: vec![0; 32], + deployed_bytecode: None, + abi: counter_contract_abi(), + } + }); + let verifier = ContractVerifier::with_resolver( + Duration::from_secs(60), + pool.clone(), + Arc::new(mock_resolver), + ) + .await + .unwrap(); + + // Check that the compiler versions are synced. + let solc_versions = storage + .contract_verification_dal() + .get_solc_versions() + .await + .unwrap(); + assert_eq!(solc_versions, [SOLC_VERSION]); + let zksolc_versions = storage + .contract_verification_dal() + .get_zksolc_versions() + .await + .unwrap(); + assert_eq!(zksolc_versions, [ZKSOLC_VERSION]); + + let (_stop_sender, stop_receiver) = watch::channel(false); + verifier.run(stop_receiver, Some(1)).await.unwrap(); + + assert_request_success(&mut storage, request_id, address, &expected_bytecode).await; +} + +async fn assert_request_success( + storage: &mut Connection<'_, Core>, + request_id: usize, + address: Address, + expected_bytecode: &[u8], +) -> VerificationInfo { + let status = storage + .contract_verification_dal() + .get_verification_request_status(request_id) + .await + .unwrap() + .expect("no status"); + assert_eq!(status.error, None); + assert_eq!(status.compilation_errors, None); + assert_eq!(status.status, "successful"); + + let verification_info = storage + .contract_verification_dal() + .get_contract_verification_info(address) + .await + .unwrap() + .expect("no verification info"); + assert_eq!(verification_info.artifacts.bytecode, *expected_bytecode); + assert_eq!( + without_internal_types(verification_info.artifacts.abi.clone()), + without_internal_types(counter_contract_abi()) + ); + verification_info +} + +fn without_internal_types(mut abi: serde_json::Value) -> serde_json::Value { + let items = abi.as_array_mut().unwrap(); + for item in items { + if let Some(inputs) = item.get_mut("inputs") { + let inputs = inputs.as_array_mut().unwrap(); + for input in inputs { + input.as_object_mut().unwrap().remove("internalType"); + } + } + if let Some(outputs) = item.get_mut("outputs") { + let outputs = outputs.as_array_mut().unwrap(); + for output in outputs { + output.as_object_mut().unwrap().remove("internalType"); + } + } + } + abi +} + +#[test_casing(2, TestContract::ALL)] +#[tokio::test] +async fn verifying_evm_bytecode(contract: TestContract) { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + let creation_bytecode = vec![3_u8; 20]; + let deployed_bytecode = vec![5_u8; 10]; + + prepare_storage(&mut storage).await; + let address = Address::repeat_byte(1); + mock_evm_deployment( + &mut storage, + address, + creation_bytecode.clone(), + &deployed_bytecode, + contract.constructor_args(), + ) + .await; + let mut req = test_request(address, contract.source()); + req.compiler_versions = CompilerVersions::Solc { + compiler_solc_version: SOLC_VERSION.to_owned(), + compiler_zksolc_version: None, + }; + req.constructor_arguments = ethabi::encode(contract.constructor_args()).into(); + let request_id = storage + .contract_verification_dal() + .add_contract_verification_request(&req) + .await + .unwrap(); + + let artifacts = CompilationArtifacts { + bytecode: creation_bytecode.clone(), + deployed_bytecode: Some(deployed_bytecode), + abi: counter_contract_abi(), + }; + let mock_resolver = MockCompilerResolver::solc(move |input| { + assert_eq!(input.standard_json.language, "Solidity"); + assert_eq!(input.standard_json.sources.len(), 1); + let source = input.standard_json.sources.values().next().unwrap(); + assert!(source.content.contains("contract Counter"), "{source:?}"); + + artifacts.clone() + }); + let verifier = ContractVerifier::with_resolver( + Duration::from_secs(60), + pool.clone(), + Arc::new(mock_resolver), + ) + .await + .unwrap(); + + let (_stop_sender, stop_receiver) = watch::channel(false); + verifier.run(stop_receiver, Some(1)).await.unwrap(); + + assert_request_success(&mut storage, request_id, address, &creation_bytecode).await; +} + +#[tokio::test] +async fn bytecode_mismatch_error() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + prepare_storage(&mut storage).await; + + let address = Address::repeat_byte(1); + mock_deployment(&mut storage, address, vec![0xff; 32], &[]).await; + let req = test_request(address, COUNTER_CONTRACT); + let request_id = storage + .contract_verification_dal() + .add_contract_verification_request(&req) + .await + .unwrap(); + + let mock_resolver = MockCompilerResolver::zksolc(|_| CompilationArtifacts { + bytecode: vec![0; 32], + deployed_bytecode: None, + abi: counter_contract_abi(), + }); + let verifier = ContractVerifier::with_resolver( + Duration::from_secs(60), + pool.clone(), + Arc::new(mock_resolver), + ) + .await + .unwrap(); + + let (_stop_sender, stop_receiver) = watch::channel(false); + verifier.run(stop_receiver, Some(1)).await.unwrap(); + + let status = storage + .contract_verification_dal() + .get_verification_request_status(request_id) + .await + .unwrap() + .expect("no status"); + assert_eq!(status.status, "failed"); + assert!(status.compilation_errors.is_none(), "{status:?}"); + let err = status.error.unwrap(); + assert_eq!(err, ContractVerifierError::BytecodeMismatch.to_string()); +} + +#[test_casing(4, Product((TestContract::ALL, BYTECODE_KINDS)))] +#[tokio::test] +async fn args_mismatch_error(contract: TestContract, bytecode_kind: BytecodeMarker) { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + + prepare_storage(&mut storage).await; + let address = Address::repeat_byte(1); + let bytecode = vec![0_u8; 32]; + match bytecode_kind { + BytecodeMarker::EraVm => { + mock_deployment( + &mut storage, + address, + bytecode.clone(), + contract.constructor_args(), + ) + .await; + } + BytecodeMarker::Evm => { + let creation_bytecode = vec![3_u8; 48]; + mock_evm_deployment( + &mut storage, + address, + creation_bytecode, + &bytecode, + contract.constructor_args(), + ) + .await; + } + } + + let mut req = test_request(address, contract.source()); + if matches!(bytecode_kind, BytecodeMarker::Evm) { + req.compiler_versions = CompilerVersions::Solc { + compiler_zksolc_version: None, + compiler_solc_version: SOLC_VERSION.to_owned(), + }; + } + + // Intentionally encode incorrect constructor args + req.constructor_arguments = match contract { + TestContract::Counter => ethabi::encode(&[Token::Bool(true)]).into(), + TestContract::CounterWithConstructor => ethabi::encode(&[]).into(), + }; + let request_id = storage + .contract_verification_dal() + .add_contract_verification_request(&req) + .await + .unwrap(); + + let mock_resolver = match bytecode_kind { + BytecodeMarker::EraVm => MockCompilerResolver::zksolc(move |_| CompilationArtifacts { + bytecode: bytecode.clone(), + deployed_bytecode: None, + abi: counter_contract_abi(), + }), + BytecodeMarker::Evm => MockCompilerResolver::solc(move |_| CompilationArtifacts { + bytecode: vec![3_u8; 48], + deployed_bytecode: Some(bytecode.clone()), + abi: counter_contract_abi(), + }), + }; + let verifier = ContractVerifier::with_resolver( + Duration::from_secs(60), + pool.clone(), + Arc::new(mock_resolver), + ) + .await + .unwrap(); + + let (_stop_sender, stop_receiver) = watch::channel(false); + verifier.run(stop_receiver, Some(1)).await.unwrap(); + + assert_constructor_args_mismatch(&mut storage, request_id).await; +} + +async fn assert_constructor_args_mismatch(storage: &mut Connection<'_, Core>, request_id: usize) { + let status = storage + .contract_verification_dal() + .get_verification_request_status(request_id) + .await + .unwrap() + .expect("no status"); + assert_eq!(status.status, "failed"); + assert_eq!(status.compilation_errors, None); + let err = status.error.unwrap(); + assert_eq!( + err, + ContractVerifierError::IncorrectConstructorArguments.to_string() + ); +} + +#[tokio::test] +async fn creation_bytecode_mismatch() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + prepare_storage(&mut storage).await; + + let address = Address::repeat_byte(1); + let creation_bytecode = vec![3; 20]; + let deployed_bytecode = vec![5; 10]; + mock_evm_deployment( + &mut storage, + address, + creation_bytecode, + &deployed_bytecode, + &[], + ) + .await; + let mut req = test_request(address, COUNTER_CONTRACT); + req.compiler_versions = CompilerVersions::Solc { + compiler_zksolc_version: None, + compiler_solc_version: SOLC_VERSION.to_owned(), + }; + let request_id = storage + .contract_verification_dal() + .add_contract_verification_request(&req) + .await + .unwrap(); + + let mock_resolver = MockCompilerResolver::solc(move |_| CompilationArtifacts { + bytecode: vec![4; 20], // differs from `creation_bytecode` + deployed_bytecode: Some(deployed_bytecode.clone()), + abi: counter_contract_abi(), + }); + let verifier = ContractVerifier::with_resolver( + Duration::from_secs(60), + pool.clone(), + Arc::new(mock_resolver), + ) + .await + .unwrap(); + + let (_stop_sender, stop_receiver) = watch::channel(false); + verifier.run(stop_receiver, Some(1)).await.unwrap(); + + let status = storage + .contract_verification_dal() + .get_verification_request_status(request_id) + .await + .unwrap() + .expect("no status"); + assert_eq!(status.status, "failed"); + assert!(status.compilation_errors.is_none(), "{status:?}"); + let err = status.error.unwrap(); + assert_eq!( + err, + ContractVerifierError::CreationBytecodeMismatch.to_string() + ); +} + +#[tokio::test] +async fn no_compiler_version() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + prepare_storage(&mut storage).await; + + let address = Address::repeat_byte(1); + mock_deployment(&mut storage, address, vec![0xff; 32], &[]).await; + let req = VerificationIncomingRequest { + compiler_versions: CompilerVersions::Solc { + compiler_zksolc_version: Some(ZKSOLC_VERSION.to_owned()), + compiler_solc_version: "1.0.0".to_owned(), // a man can dream + }, + ..test_request(address, COUNTER_CONTRACT) + }; + let request_id = storage + .contract_verification_dal() + .add_contract_verification_request(&req) + .await + .unwrap(); + + let mock_resolver = + MockCompilerResolver::zksolc(|_| unreachable!("should reject unknown solc version")); + let verifier = ContractVerifier::with_resolver( + Duration::from_secs(60), + pool.clone(), + Arc::new(mock_resolver), + ) + .await + .unwrap(); + + let (_stop_sender, stop_receiver) = watch::channel(false); + verifier.run(stop_receiver, Some(1)).await.unwrap(); + + let status = storage + .contract_verification_dal() + .get_verification_request_status(request_id) + .await + .unwrap() + .expect("no status"); + assert_eq!(status.status, "failed"); + assert!(status.compilation_errors.is_none(), "{status:?}"); + let error = status.error.unwrap(); + assert!(error.contains("solc version"), "{error}"); +} diff --git a/core/lib/contract_verifier/src/tests/real.rs b/core/lib/contract_verifier/src/tests/real.rs new file mode 100644 index 000000000000..4dbcf8860272 --- /dev/null +++ b/core/lib/contract_verifier/src/tests/real.rs @@ -0,0 +1,565 @@ +//! Tests using real compiler toolchains. Should be prepared by calling `zkstack contract-verifier init` +//! with at least one `solc` and `zksolc` version. If there are no compilers, the tests will be ignored +//! unless the `RUN_CONTRACT_VERIFICATION_TEST` env var is set to `true`, in which case the tests will fail. + +use std::{env, sync::Arc, time::Duration}; + +use assert_matches::assert_matches; +use zksync_types::bytecode::validate_bytecode; + +use super::*; + +#[derive(Debug, Clone, Copy)] +enum Toolchain { + Solidity, + Vyper, +} + +impl Toolchain { + const ALL: [Self; 2] = [Self::Solidity, Self::Vyper]; +} + +#[derive(Debug, Clone)] +struct TestCompilerVersions { + solc: String, + zksolc: String, + vyper: String, + zkvyper: String, +} + +impl TestCompilerVersions { + fn new(mut versions: SupportedCompilerVersions) -> Option { + let solc = versions + .solc + .into_iter() + .find(|ver| !ver.starts_with("zkVM"))?; + Some(Self { + solc, + zksolc: versions.zksolc.pop()?, + vyper: versions.vyper.pop()?, + zkvyper: versions.zkvyper.pop()?, + }) + } + + fn zksolc(self) -> ZkCompilerVersions { + ZkCompilerVersions { + base: self.solc, + zk: self.zksolc, + } + } + + fn solc_for_api(self, bytecode_kind: BytecodeMarker) -> CompilerVersions { + CompilerVersions::Solc { + compiler_solc_version: self.solc, + compiler_zksolc_version: match bytecode_kind { + BytecodeMarker::Evm => None, + BytecodeMarker::EraVm => Some(self.zksolc), + }, + } + } + + fn zkvyper(self) -> ZkCompilerVersions { + ZkCompilerVersions { + base: self.vyper, + zk: self.zkvyper, + } + } + + fn vyper_for_api(self, bytecode_kind: BytecodeMarker) -> CompilerVersions { + CompilerVersions::Vyper { + compiler_vyper_version: self.vyper, + compiler_zkvyper_version: match bytecode_kind { + BytecodeMarker::Evm => None, + BytecodeMarker::EraVm => Some(self.zkvyper), + }, + } + } +} + +async fn checked_env_resolver() -> Option<(EnvCompilerResolver, TestCompilerVersions)> { + let compiler_resolver = EnvCompilerResolver::default(); + let supported_compilers = compiler_resolver.supported_versions().await.ok()?; + Some(( + compiler_resolver, + TestCompilerVersions::new(supported_compilers)?, + )) +} + +fn assert_no_compilers_expected() { + assert_ne!( + env::var("RUN_CONTRACT_VERIFICATION_TEST").ok().as_deref(), + Some("true"), + "Expected pre-installed compilers since `RUN_CONTRACT_VERIFICATION_TEST=true`, but they are not installed. \ + Use `zkstack contract-verifier init` to install compilers" + ); + println!("No compilers found, skipping the test"); +} + +/// Simplifies initializing real compiler resolver in tests. +macro_rules! real_resolver { + () => { + match checked_env_resolver().await { + Some(resolver_and_versions) => resolver_and_versions, + None => { + assert_no_compilers_expected(); + return; + } + } + }; +} + +#[test_casing(2, [false, true])] +#[tokio::test] +async fn using_real_zksolc(specify_contract_file: bool) { + let (compiler_resolver, supported_compilers) = real_resolver!(); + + let compiler = compiler_resolver + .resolve_zksolc(&supported_compilers.clone().zksolc()) + .await + .unwrap(); + let mut req = VerificationIncomingRequest { + compiler_versions: supported_compilers.solc_for_api(BytecodeMarker::EraVm), + ..test_request(Address::repeat_byte(1), COUNTER_CONTRACT) + }; + if specify_contract_file { + set_multi_file_solc_input(&mut req); + } + + let input = ZkSolc::build_input(req).unwrap(); + let output = compiler.compile(input).await.unwrap(); + + validate_bytecode(&output.bytecode).unwrap(); + assert_eq!(output.abi, counter_contract_abi()); +} + +fn set_multi_file_solc_input(req: &mut VerificationIncomingRequest) { + let input = serde_json::json!({ + "language": "Solidity", + "sources": { + "contracts/test.sol": { + "content": COUNTER_CONTRACT, + }, + }, + "settings": { + "optimizer": { "enabled": true }, + }, + }); + let serde_json::Value::Object(input) = input else { + unreachable!(); + }; + req.source_code_data = SourceCodeData::StandardJsonInput(input); + req.contract_name = "contracts/test.sol:Counter".to_owned(); +} + +#[test_casing(2, [false, true])] +#[tokio::test] +async fn using_standalone_solc(specify_contract_file: bool) { + let (compiler_resolver, supported_compilers) = real_resolver!(); + + let version = &supported_compilers.solc; + let compiler = compiler_resolver.resolve_solc(version).await.unwrap(); + let mut req = VerificationIncomingRequest { + compiler_versions: CompilerVersions::Solc { + compiler_solc_version: version.clone(), + compiler_zksolc_version: None, + }, + ..test_request(Address::repeat_byte(1), COUNTER_CONTRACT) + }; + if specify_contract_file { + set_multi_file_solc_input(&mut req); + } + + let input = Solc::build_input(req).unwrap(); + let output = compiler.compile(input).await.unwrap(); + + assert!(output.deployed_bytecode.is_some()); + assert_eq!(output.abi, counter_contract_abi()); +} + +#[test_casing(2, [false, true])] +#[tokio::test] +async fn using_zksolc_with_abstract_contract(specify_contract_file: bool) { + let (compiler_resolver, supported_compilers) = real_resolver!(); + + let compiler = compiler_resolver + .resolve_zksolc(&supported_compilers.clone().zksolc()) + .await + .unwrap(); + let (source_code_data, contract_name) = if specify_contract_file { + let input = serde_json::json!({ + "language": "Solidity", + "sources": { + "contracts/test.sol": { + "content": COUNTER_CONTRACT_WITH_INTERFACE, + }, + }, + "settings": { + "optimizer": { "enabled": true }, + }, + }); + let serde_json::Value::Object(input) = input else { + unreachable!(); + }; + ( + SourceCodeData::StandardJsonInput(input), + "contracts/test.sol:ICounter", + ) + } else { + ( + SourceCodeData::SolSingleFile(COUNTER_CONTRACT_WITH_INTERFACE.to_owned()), + "ICounter", + ) + }; + + let req = VerificationIncomingRequest { + contract_address: Address::repeat_byte(1), + compiler_versions: supported_compilers.solc_for_api(BytecodeMarker::EraVm), + optimization_used: true, + optimizer_mode: None, + constructor_arguments: Default::default(), + is_system: false, + source_code_data, + contract_name: contract_name.to_owned(), + force_evmla: false, + }; + + let input = ZkSolc::build_input(req).unwrap(); + let err = compiler.compile(input).await.unwrap_err(); + assert_matches!( + err, + ContractVerifierError::AbstractContract(name) if name == "ICounter" + ); +} + +fn test_yul_request(compiler_versions: CompilerVersions) -> VerificationIncomingRequest { + VerificationIncomingRequest { + contract_address: Default::default(), + source_code_data: SourceCodeData::YulSingleFile(EMPTY_YUL_CONTRACT.to_owned()), + contract_name: "Empty".to_owned(), + compiler_versions, + optimization_used: true, + optimizer_mode: None, + constructor_arguments: Default::default(), + is_system: false, + force_evmla: false, + } +} + +#[tokio::test] +async fn compiling_yul_with_zksolc() { + let (compiler_resolver, supported_compilers) = real_resolver!(); + + let version = supported_compilers.clone().zksolc(); + let compiler = compiler_resolver.resolve_zksolc(&version).await.unwrap(); + let req = test_yul_request(supported_compilers.solc_for_api(BytecodeMarker::EraVm)); + let input = ZkSolc::build_input(req).unwrap(); + let output = compiler.compile(input).await.unwrap(); + + assert!(!output.bytecode.is_empty()); + assert!(output.deployed_bytecode.is_none()); + assert_eq!(output.abi, serde_json::json!([])); +} + +#[tokio::test] +async fn compiling_standalone_yul() { + let (compiler_resolver, supported_compilers) = real_resolver!(); + + let version = &supported_compilers.solc; + let compiler = compiler_resolver.resolve_solc(version).await.unwrap(); + let req = test_yul_request(CompilerVersions::Solc { + compiler_solc_version: version.clone(), + compiler_zksolc_version: None, + }); + let input = Solc::build_input(req).unwrap(); + let output = compiler.compile(input).await.unwrap(); + + assert!(!output.bytecode.is_empty()); + assert_ne!(output.deployed_bytecode.unwrap(), output.bytecode); + assert_eq!(output.abi, serde_json::json!([])); +} + +fn test_vyper_request( + filename: &str, + contract_name: &str, + supported_compilers: TestCompilerVersions, + bytecode_kind: BytecodeMarker, +) -> VerificationIncomingRequest { + VerificationIncomingRequest { + contract_address: Address::repeat_byte(1), + source_code_data: SourceCodeData::VyperMultiFile(HashMap::from([( + filename.to_owned(), + COUNTER_VYPER_CONTRACT.to_owned(), + )])), + contract_name: contract_name.to_owned(), + compiler_versions: supported_compilers.vyper_for_api(bytecode_kind), + optimization_used: true, + optimizer_mode: None, + constructor_arguments: Default::default(), + is_system: false, + force_evmla: false, + } +} + +#[test_casing(2, [false, true])] +#[tokio::test] +async fn using_real_zkvyper(specify_contract_file: bool) { + let (compiler_resolver, supported_compilers) = real_resolver!(); + + let compiler = compiler_resolver + .resolve_zkvyper(&supported_compilers.clone().zkvyper()) + .await + .unwrap(); + let (filename, contract_name) = if specify_contract_file { + ("contracts/Counter.vy", "contracts/Counter.vy:Counter") + } else { + ("Counter", "Counter") + }; + let req = test_vyper_request( + filename, + contract_name, + supported_compilers, + BytecodeMarker::EraVm, + ); + let input = VyperInput::new(req).unwrap(); + let output = compiler.compile(input).await.unwrap(); + + validate_bytecode(&output.bytecode).unwrap(); + assert_eq!(output.abi, without_internal_types(counter_contract_abi())); +} + +#[test_casing(2, [false, true])] +#[tokio::test] +async fn using_standalone_vyper(specify_contract_file: bool) { + let (compiler_resolver, supported_compilers) = real_resolver!(); + + let version = &supported_compilers.vyper; + let compiler = compiler_resolver.resolve_vyper(version).await.unwrap(); + let (filename, contract_name) = if specify_contract_file { + ("contracts/Counter.vy", "contracts/Counter.vy:Counter") + } else { + ("Counter.vy", "Counter") + }; + let req = test_vyper_request( + filename, + contract_name, + supported_compilers, + BytecodeMarker::Evm, + ); + let input = VyperInput::new(req).unwrap(); + let output = compiler.compile(input).await.unwrap(); + + assert!(output.deployed_bytecode.is_some()); + assert_eq!(output.abi, without_internal_types(counter_contract_abi())); +} + +#[tokio::test] +async fn using_standalone_vyper_without_optimization() { + let (compiler_resolver, supported_compilers) = real_resolver!(); + + let version = &supported_compilers.vyper; + let compiler = compiler_resolver.resolve_vyper(version).await.unwrap(); + let mut req = test_vyper_request( + "counter.vy", + "counter", + supported_compilers, + BytecodeMarker::Evm, + ); + req.optimization_used = false; + let input = VyperInput::new(req).unwrap(); + let output = compiler.compile(input).await.unwrap(); + + assert!(output.deployed_bytecode.is_some()); + assert_eq!(output.abi, without_internal_types(counter_contract_abi())); +} + +#[tokio::test] +async fn using_standalone_vyper_with_code_size_optimization() { + let (compiler_resolver, supported_compilers) = real_resolver!(); + + let version = &supported_compilers.vyper; + let compiler = compiler_resolver.resolve_vyper(version).await.unwrap(); + let mut req = test_vyper_request( + "counter.vy", + "counter", + supported_compilers, + BytecodeMarker::Evm, + ); + req.optimization_used = true; + req.optimizer_mode = Some("codesize".to_owned()); + let input = VyperInput::new(req).unwrap(); + let output = compiler.compile(input).await.unwrap(); + + assert!(output.deployed_bytecode.is_some()); + assert_eq!(output.abi, without_internal_types(counter_contract_abi())); +} + +#[tokio::test] +async fn using_standalone_vyper_with_bogus_optimization() { + let (compiler_resolver, supported_compilers) = real_resolver!(); + + let version = &supported_compilers.vyper; + let compiler = compiler_resolver.resolve_vyper(version).await.unwrap(); + let mut req = test_vyper_request( + "counter.vy", + "counter", + supported_compilers, + BytecodeMarker::Evm, + ); + req.optimization_used = true; + req.optimizer_mode = Some("???".to_owned()); + let input = VyperInput::new(req).unwrap(); + let err = compiler.compile(input).await.unwrap_err(); + + let ContractVerifierError::CompilationError(serde_json::Value::Array(errors)) = err else { + panic!("unexpected error: {err:?}"); + }; + let has_opt_level_error = errors + .iter() + .any(|err| err.as_str().unwrap().contains("optimization level")); + assert!(has_opt_level_error, "{errors:?}"); +} + +#[test_casing(4, Product((BYTECODE_KINDS, Toolchain::ALL)))] +#[tokio::test] +async fn using_real_compiler_in_verifier(bytecode_kind: BytecodeMarker, toolchain: Toolchain) { + let (compiler_resolver, supported_compilers) = real_resolver!(); + + let req = match toolchain { + Toolchain::Solidity => VerificationIncomingRequest { + compiler_versions: supported_compilers.clone().solc_for_api(bytecode_kind), + ..test_request(Address::repeat_byte(1), COUNTER_CONTRACT) + }, + Toolchain::Vyper => VerificationIncomingRequest { + compiler_versions: supported_compilers.clone().vyper_for_api(bytecode_kind), + source_code_data: SourceCodeData::VyperMultiFile(HashMap::from([( + "Counter.vy".to_owned(), + COUNTER_VYPER_CONTRACT.to_owned(), + )])), + ..test_request(Address::repeat_byte(1), COUNTER_CONTRACT) + }, + }; + let address = Address::repeat_byte(1); + let output = match (bytecode_kind, toolchain) { + (BytecodeMarker::EraVm, Toolchain::Solidity) => { + let compiler = compiler_resolver + .resolve_zksolc(&supported_compilers.zksolc()) + .await + .unwrap(); + let input = ZkSolc::build_input(req.clone()).unwrap(); + compiler.compile(input).await.unwrap() + } + (BytecodeMarker::Evm, Toolchain::Solidity) => { + let solc_version = &supported_compilers.solc; + let compiler = compiler_resolver.resolve_solc(solc_version).await.unwrap(); + let input = Solc::build_input(req.clone()).unwrap(); + compiler.compile(input).await.unwrap() + } + (_, Toolchain::Vyper) => { + let compiler = match bytecode_kind { + BytecodeMarker::EraVm => compiler_resolver + .resolve_zkvyper(&supported_compilers.zkvyper()) + .await + .unwrap(), + BytecodeMarker::Evm => compiler_resolver + .resolve_vyper(&supported_compilers.vyper) + .await + .unwrap(), + }; + let input = VyperInput::new(req.clone()).unwrap(); + compiler.compile(input).await.unwrap() + } + }; + + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + prepare_storage(&mut storage).await; + match bytecode_kind { + BytecodeMarker::EraVm => { + mock_deployment(&mut storage, address, output.bytecode.clone(), &[]).await; + } + BytecodeMarker::Evm => { + mock_evm_deployment( + &mut storage, + address, + output.bytecode.clone(), + output.deployed_bytecode(), + &[], + ) + .await; + } + } + let request_id = storage + .contract_verification_dal() + .add_contract_verification_request(&req) + .await + .unwrap(); + + let verifier = ContractVerifier::with_resolver( + Duration::from_secs(60), + pool.clone(), + Arc::new(compiler_resolver), + ) + .await + .unwrap(); + + let (_stop_sender, stop_receiver) = watch::channel(false); + verifier.run(stop_receiver, Some(1)).await.unwrap(); + + assert_request_success(&mut storage, request_id, address, &output.bytecode).await; +} + +#[test_casing(2, BYTECODE_KINDS)] +#[tokio::test] +async fn compilation_errors(bytecode_kind: BytecodeMarker) { + let (compiler_resolver, supported_compilers) = real_resolver!(); + + let address = Address::repeat_byte(1); + let req = VerificationIncomingRequest { + compiler_versions: supported_compilers.solc_for_api(bytecode_kind), + source_code_data: SourceCodeData::SolSingleFile("contract ???".to_owned()), + ..test_request(Address::repeat_byte(1), COUNTER_CONTRACT) + }; + + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + prepare_storage(&mut storage).await; + match bytecode_kind { + BytecodeMarker::EraVm => { + mock_deployment(&mut storage, address, vec![0; 32], &[]).await; + } + BytecodeMarker::Evm => { + mock_evm_deployment(&mut storage, address, vec![3; 20], &[5; 10], &[]).await; + } + } + + let request_id = storage + .contract_verification_dal() + .add_contract_verification_request(&req) + .await + .unwrap(); + + let verifier = ContractVerifier::with_resolver( + Duration::from_secs(60), + pool.clone(), + Arc::new(compiler_resolver), + ) + .await + .unwrap(); + + let (_stop_sender, stop_receiver) = watch::channel(false); + verifier.run(stop_receiver, Some(1)).await.unwrap(); + + let status = storage + .contract_verification_dal() + .get_verification_request_status(request_id) + .await + .unwrap() + .expect("no status"); + assert_eq!(status.status, "failed"); + let compilation_errors = status.compilation_errors.unwrap(); + assert!(!compilation_errors.is_empty()); + let has_parser_error = compilation_errors + .iter() + .any(|err| err.contains("ParserError") && err.contains("Counter.sol")); + assert!(has_parser_error, "{compilation_errors:?}"); +} diff --git a/core/lib/contract_verifier/src/zksolc_utils.rs b/core/lib/contract_verifier/src/zksolc_utils.rs deleted file mode 100644 index 08004632bcec..000000000000 --- a/core/lib/contract_verifier/src/zksolc_utils.rs +++ /dev/null @@ -1,250 +0,0 @@ -use std::{collections::HashMap, io::Write, path::PathBuf, process::Stdio}; - -use semver::Version; -use serde::{Deserialize, Serialize}; - -use crate::error::ContractVerifierError; - -#[derive(Debug)] -pub enum ZkSolcInput { - StandardJson(StandardJson), - YulSingleFile { - source_code: String, - is_system: bool, - }, -} - -#[derive(Debug)] -pub enum ZkSolcOutput { - StandardJson(serde_json::Value), - YulSingleFile(String), -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct StandardJson { - /// The input language. - pub language: String, - /// The input source code files hashmap. - pub sources: HashMap, - /// The compiler settings. - pub settings: Settings, -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Source { - /// The source code file content. - pub content: String, -} - -/// Compiler settings. -/// There are fields like `output_selection`, `is_system`, `force_evmla` which are accessed by contract verifier explicitly. -/// Other fields are accumulated in `other`, this way every field that was in the original request will be passed to a compiler. -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Settings { - /// The output selection filters. - pub output_selection: Option, - /// Flag for system compilation mode. - #[serde(default)] - pub is_system: bool, - /// Flag to force `evmla` IR. - #[serde(default)] - pub force_evmla: bool, - /// Other fields. - #[serde(flatten)] - pub other: serde_json::Value, -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Optimizer { - /// Whether the optimizer is enabled. - pub enabled: bool, - /// The optimization mode string. - pub mode: Option, -} - -impl Default for Optimizer { - fn default() -> Self { - Self { - enabled: true, - mode: None, - } - } -} - -pub struct ZkSolc { - zksolc_path: PathBuf, - solc_path: PathBuf, - zksolc_version: String, -} - -impl ZkSolc { - pub fn new( - zksolc_path: impl Into, - solc_path: impl Into, - zksolc_version: String, - ) -> Self { - ZkSolc { - zksolc_path: zksolc_path.into(), - solc_path: solc_path.into(), - zksolc_version, - } - } - - pub async fn async_compile( - &self, - input: ZkSolcInput, - ) -> Result { - use tokio::io::AsyncWriteExt; - let mut command = tokio::process::Command::new(&self.zksolc_path); - command.stdout(Stdio::piped()).stderr(Stdio::piped()); - - match &input { - ZkSolcInput::StandardJson(input) => { - if !self.is_post_1_5_0() { - if input.settings.is_system { - command.arg("--system-mode"); - } - if input.settings.force_evmla { - command.arg("--force-evmla"); - } - } - - command.arg("--solc").arg(self.solc_path.to_str().unwrap()); - } - ZkSolcInput::YulSingleFile { is_system, .. } => { - if self.is_post_1_5_0() { - if *is_system { - command.arg("--enable-eravm-extensions"); - } else { - command.arg("--solc").arg(self.solc_path.to_str().unwrap()); - } - } else { - if *is_system { - command.arg("--system-mode"); - } - command.arg("--solc").arg(self.solc_path.to_str().unwrap()); - } - } - } - match input { - ZkSolcInput::StandardJson(input) => { - let mut child = command - .arg("--standard-json") - .stdin(Stdio::piped()) - .spawn() - .map_err(|_err| ContractVerifierError::InternalError)?; - let stdin = child.stdin.as_mut().unwrap(); - let content = serde_json::to_vec(&input).unwrap(); - stdin - .write_all(&content) - .await - .map_err(|_err| ContractVerifierError::InternalError)?; - stdin - .flush() - .await - .map_err(|_err| ContractVerifierError::InternalError)?; - - let output = child - .wait_with_output() - .await - .map_err(|_err| ContractVerifierError::InternalError)?; - if output.status.success() { - Ok(ZkSolcOutput::StandardJson( - serde_json::from_slice(&output.stdout) - .expect("Compiler output must be valid JSON"), - )) - } else { - Err(ContractVerifierError::CompilerError( - "zksolc".to_string(), - String::from_utf8_lossy(&output.stderr).to_string(), - )) - } - } - ZkSolcInput::YulSingleFile { source_code, .. } => { - let mut file = tempfile::Builder::new() - .prefix("input") - .suffix(".yul") - .rand_bytes(0) - .tempfile() - .map_err(|_err| ContractVerifierError::InternalError)?; - file.write_all(source_code.as_bytes()) - .map_err(|_err| ContractVerifierError::InternalError)?; - let child = command - .arg(file.path().to_str().unwrap()) - .arg("--optimization") - .arg("3") - .arg("--yul") - .arg("--bin") - .spawn() - .map_err(|_err| ContractVerifierError::InternalError)?; - let output = child - .wait_with_output() - .await - .map_err(|_err| ContractVerifierError::InternalError)?; - if output.status.success() { - Ok(ZkSolcOutput::YulSingleFile( - String::from_utf8(output.stdout).expect("Couldn't parse string"), - )) - } else { - Err(ContractVerifierError::CompilerError( - "zksolc".to_string(), - String::from_utf8_lossy(&output.stderr).to_string(), - )) - } - } - } - } - - pub fn is_post_1_5_0(&self) -> bool { - // Special case - if &self.zksolc_version == "vm-1.5.0-a167aa3" { - false - } else if let Some(version) = self.zksolc_version.strip_prefix("v") { - if let Ok(semver) = Version::parse(version) { - let target = Version::new(1, 5, 0); - semver >= target - } else { - true - } - } else { - true - } - } -} - -#[cfg(test)] -mod tests { - use crate::zksolc_utils::ZkSolc; - - #[test] - fn check_is_post_1_5_0() { - // Special case. - let mut zksolc = ZkSolc::new(".", ".", "vm-1.5.0-a167aa3".to_string()); - assert!(!zksolc.is_post_1_5_0(), "vm-1.5.0-a167aa3"); - - zksolc.zksolc_version = "v1.5.0".to_string(); - assert!(zksolc.is_post_1_5_0(), "v1.5.0"); - - zksolc.zksolc_version = "v1.5.1".to_string(); - assert!(zksolc.is_post_1_5_0(), "v1.5.1"); - - zksolc.zksolc_version = "v1.10.1".to_string(); - assert!(zksolc.is_post_1_5_0(), "v1.10.1"); - - zksolc.zksolc_version = "v2.0.0".to_string(); - assert!(zksolc.is_post_1_5_0(), "v2.0.0"); - - zksolc.zksolc_version = "v1.4.15".to_string(); - assert!(!zksolc.is_post_1_5_0(), "v1.4.15"); - - zksolc.zksolc_version = "v1.3.21".to_string(); - assert!(!zksolc.is_post_1_5_0(), "v1.3.21"); - - zksolc.zksolc_version = "v0.5.1".to_string(); - assert!(!zksolc.is_post_1_5_0(), "v0.5.1"); - } -} diff --git a/core/lib/contract_verifier/src/zkvyper_utils.rs b/core/lib/contract_verifier/src/zkvyper_utils.rs deleted file mode 100644 index c597f78d4588..000000000000 --- a/core/lib/contract_verifier/src/zkvyper_utils.rs +++ /dev/null @@ -1,73 +0,0 @@ -use std::{collections::HashMap, fs::File, io::Write, path::PathBuf, process::Stdio}; - -use crate::error::ContractVerifierError; - -#[derive(Debug)] -pub struct ZkVyperInput { - pub sources: HashMap, - pub optimizer_mode: Option, -} - -pub struct ZkVyper { - zkvyper_path: PathBuf, - vyper_path: PathBuf, -} - -impl ZkVyper { - pub fn new(zkvyper_path: impl Into, vyper_path: impl Into) -> Self { - ZkVyper { - zkvyper_path: zkvyper_path.into(), - vyper_path: vyper_path.into(), - } - } - - pub async fn async_compile( - &self, - input: ZkVyperInput, - ) -> Result { - let mut command = tokio::process::Command::new(&self.zkvyper_path); - if let Some(o) = input.optimizer_mode.as_ref() { - command.arg("-O").arg(o); - } - command - .arg("--vyper") - .arg(self.vyper_path.to_str().unwrap()) - .arg("-f") - .arg("combined_json") - .stdout(Stdio::piped()) - .stderr(Stdio::piped()); - - let temp_dir = tempfile::tempdir().map_err(|_err| ContractVerifierError::InternalError)?; - for (mut name, content) in input.sources { - if !name.ends_with(".vy") { - name += ".vy"; - } - let path = temp_dir.path().join(name); - if let Some(prefix) = path.parent() { - std::fs::create_dir_all(prefix) - .map_err(|_err| ContractVerifierError::InternalError)?; - } - let mut file = - File::create(&path).map_err(|_err| ContractVerifierError::InternalError)?; - file.write_all(content.as_bytes()) - .map_err(|_err| ContractVerifierError::InternalError)?; - command.arg(path.into_os_string()); - } - - let child = command - .spawn() - .map_err(|_err| ContractVerifierError::InternalError)?; - let output = child - .wait_with_output() - .await - .map_err(|_err| ContractVerifierError::InternalError)?; - if output.status.success() { - Ok(serde_json::from_slice(&output.stdout).expect("Compiler output must be valid JSON")) - } else { - Err(ContractVerifierError::CompilerError( - "zkvyper".to_string(), - String::from_utf8_lossy(&output.stderr).to_string(), - )) - } - } -} diff --git a/core/lib/contracts/Cargo.toml b/core/lib/contracts/Cargo.toml index 2b80295cf440..0a24012f1ba6 100644 --- a/core/lib/contracts/Cargo.toml +++ b/core/lib/contracts/Cargo.toml @@ -11,11 +11,14 @@ keywords.workspace = true categories.workspace = true [dependencies] +zksync_basic_types.workspace = true zksync_utils.workspace = true -ethabi.workspace = true serde_json.workspace = true serde.workspace = true once_cell.workspace = true hex.workspace = true envy.workspace = true + +[dev-dependencies] +bincode.workspace = true diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index a9e7324d5af6..74efa72793aa 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -10,15 +10,16 @@ use std::{ path::{Path, PathBuf}, }; -use ethabi::{ - ethereum_types::{H256, U256}, - Contract, Event, Function, -}; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, env::Workspace}; +use zksync_basic_types::{ + bytecode::BytecodeHash, + ethabi::{Contract, Event, Function}, + H256, +}; +use zksync_utils::env::Workspace; -pub mod test_contracts; +mod serde_bytecode; #[derive(Debug, Clone)] pub enum ContractLanguage { @@ -60,10 +61,6 @@ const _IERC20_CONTRACT_FILE: &str = "contracts/l1-contracts/artifacts/contracts/common/interfaces/IERC20.sol/IERC20.json"; const _FAIL_ON_RECEIVE_CONTRACT_FILE: &str = "contracts/l1-contracts/artifacts/contracts/zksync/dev-contracts/FailOnReceive.sol/FailOnReceive.json"; -const LOADNEXT_CONTRACT_FILE: &str = - "etc/contracts-test-data/artifacts-zk/contracts/loadnext/loadnext_contract.sol/LoadnextContract.json"; -const LOADNEXT_SIMPLE_CONTRACT_FILE: &str = - "etc/contracts-test-data/artifacts-zk/contracts/loadnext/loadnext_contract.sol/Foo.json"; fn home_path() -> PathBuf { Workspace::locate().core() @@ -173,33 +170,6 @@ pub fn verifier_contract() -> Contract { load_contract_for_both_compilers(VERIFIER_CONTRACT_FILE) } -#[derive(Debug, Clone)] -pub struct TestContract { - /// Contract bytecode to be used for sending deploy transaction. - pub bytecode: Vec, - /// Contract ABI. - pub contract: Contract, - - pub factory_deps: Vec>, -} - -/// Reads test contract bytecode and its ABI. -pub fn get_loadnext_contract() -> TestContract { - let bytecode = read_bytecode(LOADNEXT_CONTRACT_FILE); - let dep = read_bytecode(LOADNEXT_SIMPLE_CONTRACT_FILE); - - TestContract { - bytecode, - contract: loadnext_contract(), - factory_deps: vec![dep], - } -} - -// Returns loadnext contract and its factory dependencies -fn loadnext_contract() -> Contract { - load_contract("etc/contracts-test-data/artifacts-zk/contracts/loadnext/loadnext_contract.sol/LoadnextContract.json") -} - pub fn deployer_contract() -> Contract { load_sys_contract("ContractDeployer") } @@ -295,10 +265,11 @@ impl SystemContractsRepo { ))) { contract } else { - read_zbin_bytecode_from_path(self.root.join(format!( - "contracts-preprocessed/{0}artifacts/{1}.yul.zbin", - directory, name - ))) + read_yul_bytecode_by_path( + self.root + .join(format!("contracts-preprocessed/{directory}artifacts")), + name, + ) } } } @@ -313,10 +284,10 @@ pub fn read_bootloader_code(bootloader_type: &str) -> Vec { { return contract; }; - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/build/artifacts/{}.yul.zbin", - bootloader_type - )) + read_yul_bytecode( + "contracts/system-contracts/bootloader/build/artifacts", + bootloader_type, + ) } fn read_proved_batch_bootloader_bytecode() -> Vec { @@ -378,7 +349,8 @@ fn read_zbin_bytecode_from_hex_file(bytecode_path: PathBuf) -> Vec { /// Hash of code and code which consists of 32 bytes words #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SystemContractCode { - pub code: Vec, + #[serde(with = "serde_bytecode")] + pub code: Vec, pub hash: H256, } @@ -408,18 +380,16 @@ impl PartialEq for BaseSystemContracts { impl BaseSystemContracts { fn load_with_bootloader(bootloader_bytecode: Vec) -> Self { - let hash = hash_bytecode(&bootloader_bytecode); - + let hash = BytecodeHash::for_bytecode(&bootloader_bytecode).value(); let bootloader = SystemContractCode { - code: bytes_to_be_words(bootloader_bytecode), + code: bootloader_bytecode, hash, }; let bytecode = read_sys_contract_bytecode("", "DefaultAccount", ContractLanguage::Sol); - let hash = hash_bytecode(&bytecode); - + let hash = BytecodeHash::for_bytecode(&bytecode).value(); let default_aa = SystemContractCode { - code: bytes_to_be_words(bytecode), + code: bytecode, hash, }; @@ -438,10 +408,10 @@ impl BaseSystemContracts { /// Loads the latest EVM emulator for these base system contracts. Logically, it only makes sense to do for the latest protocol version. pub fn with_latest_evm_emulator(mut self) -> Self { - let bytecode = read_sys_contract_bytecode("", "EvmInterpreter", ContractLanguage::Yul); - let hash = hash_bytecode(&bytecode); + let bytecode = read_sys_contract_bytecode("", "EvmEmulator", ContractLanguage::Yul); + let hash = BytecodeHash::for_bytecode(&bytecode).value(); self.evm_emulator = Some(SystemContractCode { - code: bytes_to_be_words(bytecode), + code: bytecode, hash, }); self @@ -515,6 +485,13 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + pub fn playground_gateway() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_gateway/playground_batch.yul/playground_batch.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + pub fn estimate_gas_pre_virtual_blocks() -> Self { let bootloader_bytecode = read_zbin_bytecode( "etc/multivm_bootloaders/vm_1_3_2/fee_estimate.yul/fee_estimate.yul.zbin", @@ -585,6 +562,13 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + pub fn estimate_gas_gateway() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_gateway/fee_estimate.yul/fee_estimate.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + pub fn hashes(&self) -> BaseSystemContractsHashes { BaseSystemContractsHashes { bootloader: self.bootloader.hash, diff --git a/core/lib/contracts/src/serde_bytecode.rs b/core/lib/contracts/src/serde_bytecode.rs new file mode 100644 index 000000000000..8f250fe4672a --- /dev/null +++ b/core/lib/contracts/src/serde_bytecode.rs @@ -0,0 +1,112 @@ +use std::fmt; + +use serde::{de, de::SeqAccess, ser, ser::SerializeSeq, Deserializer, Serializer}; +use zksync_basic_types::U256; + +pub(super) fn serialize(bytes: &[u8], serializer: S) -> Result { + if bytes.len() % 32 != 0 { + return Err(ser::Error::custom("bytecode length is not divisible by 32")); + } + let mut seq = serializer.serialize_seq(Some(bytes.len() / 32))?; + for chunk in bytes.chunks(32) { + let word = U256::from_big_endian(chunk); + seq.serialize_element(&word)?; + } + seq.end() +} + +#[derive(Debug)] +struct SeqVisitor; + +impl<'de> de::Visitor<'de> for SeqVisitor { + type Value = Vec; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(formatter, "sequence of `U256` words") + } + + fn visit_seq>(self, mut seq: A) -> Result { + let len = seq.size_hint().unwrap_or(0) * 32; + let mut bytes = Vec::with_capacity(len); + while let Some(value) = seq.next_element::()? { + let prev_len = bytes.len(); + bytes.resize(prev_len + 32, 0); + value.to_big_endian(&mut bytes[prev_len..]); + } + Ok(bytes) + } +} + +pub(super) fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result, D::Error> { + deserializer.deserialize_seq(SeqVisitor) +} + +#[cfg(test)] +mod tests { + use serde::{Deserialize, Serialize}; + use zksync_basic_types::{H256, U256}; + + use crate::SystemContractCode; + + /// Code with legacy serialization logic. + #[derive(Debug, Serialize, Deserialize)] + struct LegacySystemContractCode { + code: Vec, + hash: H256, + } + + impl From<&SystemContractCode> for LegacySystemContractCode { + fn from(value: &SystemContractCode) -> Self { + Self { + code: value.code.chunks(32).map(U256::from_big_endian).collect(), + hash: value.hash, + } + } + } + + fn test_code() -> SystemContractCode { + let mut code = vec![0; 32]; + code.extend_from_slice(&[0; 30]); + code.extend_from_slice(&[0xab, 0xcd]); + code.extend_from_slice(&[0x23; 32]); + + SystemContractCode { + hash: H256::repeat_byte(0x42), + code, + } + } + + #[test] + fn serializing_system_contract_code() { + let system_contract_code = test_code(); + let json = serde_json::to_value(&system_contract_code).unwrap(); + assert_eq!( + json, + serde_json::json!({ + "code": ["0x0", "0xabcd", "0x2323232323232323232323232323232323232323232323232323232323232323"], + "hash": "0x4242424242424242424242424242424242424242424242424242424242424242", + }) + ); + + let legacy_code = LegacySystemContractCode::from(&system_contract_code); + let legacy_json = serde_json::to_value(&legacy_code).unwrap(); + assert_eq!(legacy_json, json); + + let restored: SystemContractCode = serde_json::from_value(json).unwrap(); + assert_eq!(restored.code, system_contract_code.code); + assert_eq!(restored.hash, system_contract_code.hash); + } + + #[test] + fn serializing_system_contract_code_using_bincode() { + let system_contract_code = test_code(); + let bytes = bincode::serialize(&system_contract_code).unwrap(); + let restored: SystemContractCode = bincode::deserialize(&bytes).unwrap(); + assert_eq!(restored.code, system_contract_code.code); + assert_eq!(restored.hash, system_contract_code.hash); + + let legacy_code = LegacySystemContractCode::from(&system_contract_code); + let legacy_bytes = bincode::serialize(&legacy_code).unwrap(); + assert_eq!(legacy_bytes, bytes); + } +} diff --git a/core/lib/contracts/src/test_contracts.rs b/core/lib/contracts/src/test_contracts.rs deleted file mode 100644 index eab1587f8335..000000000000 --- a/core/lib/contracts/src/test_contracts.rs +++ /dev/null @@ -1,64 +0,0 @@ -use ethabi::{ethereum_types::U256, Bytes, Token}; -use serde::Deserialize; - -use crate::get_loadnext_contract; - -#[derive(Debug, Clone, Deserialize)] -pub struct LoadnextContractExecutionParams { - pub reads: usize, - pub writes: usize, - pub events: usize, - pub hashes: usize, - pub recursive_calls: usize, - pub deploys: usize, -} - -impl LoadnextContractExecutionParams { - pub fn from_env() -> Option { - envy::prefixed("CONTRACT_EXECUTION_PARAMS_").from_env().ok() - } - - pub fn empty() -> Self { - Self { - reads: 0, - writes: 0, - events: 0, - hashes: 0, - recursive_calls: 0, - deploys: 0, - } - } -} - -impl Default for LoadnextContractExecutionParams { - fn default() -> Self { - Self { - reads: 10, - writes: 10, - events: 10, - hashes: 10, - recursive_calls: 1, - deploys: 1, - } - } -} - -impl LoadnextContractExecutionParams { - pub fn to_bytes(&self) -> Bytes { - let loadnext_contract = get_loadnext_contract(); - let contract_function = loadnext_contract.contract.function("execute").unwrap(); - - let params = vec![ - Token::Uint(U256::from(self.reads)), - Token::Uint(U256::from(self.writes)), - Token::Uint(U256::from(self.hashes)), - Token::Uint(U256::from(self.events)), - Token::Uint(U256::from(self.recursive_calls)), - Token::Uint(U256::from(self.deploys)), - ]; - - contract_function - .encode_input(¶ms) - .expect("failed to encode parameters") - } -} diff --git a/core/lib/crypto_primitives/Cargo.toml b/core/lib/crypto_primitives/Cargo.toml index 7efe5279b598..651609ec7949 100644 --- a/core/lib/crypto_primitives/Cargo.toml +++ b/core/lib/crypto_primitives/Cargo.toml @@ -15,7 +15,6 @@ categories.workspace = true secp256k1 = { workspace = true, features = ["global-context"] } sha2.workspace = true blake2.workspace = true -zksync_utils.workspace = true zksync_basic_types.workspace = true thiserror.workspace = true serde_json.workspace = true diff --git a/core/lib/crypto_primitives/src/packed_eth_signature.rs b/core/lib/crypto_primitives/src/packed_eth_signature.rs index 3d76de73560e..c4a26bf351b4 100644 --- a/core/lib/crypto_primitives/src/packed_eth_signature.rs +++ b/core/lib/crypto_primitives/src/packed_eth_signature.rs @@ -1,7 +1,6 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; use thiserror::Error; -use zksync_basic_types::{web3::keccak256, Address, H256}; -use zksync_utils::ZeroPrefixHexSerde; +use zksync_basic_types::{serde_wrappers::ZeroPrefixHexSerde, web3::keccak256, Address, H256}; use crate::{ ecdsa_signature::{ diff --git a/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json b/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json deleted file mode 100644 index 05b94ad249ac..000000000000 --- a/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n updated_at = NOW(),\n time_taken = $3,\n input_blob_url = $4\n WHERE\n l1_batch_number = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Int8", - "Time", - "Text" - ] - }, - "nullable": [] - }, - "hash": "0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04" -} diff --git a/core/lib/dal/.sqlx/query-0429f2fa683bdff6fc1ff5069de69d57dbfda4be1f70232afffca82a895d43e0.json b/core/lib/dal/.sqlx/query-12c062c6a5078ebcbde378126a3773e86be9876cd198610e0792322e2a0797af.json similarity index 50% rename from core/lib/dal/.sqlx/query-0429f2fa683bdff6fc1ff5069de69d57dbfda4be1f70232afffca82a895d43e0.json rename to core/lib/dal/.sqlx/query-12c062c6a5078ebcbde378126a3773e86be9876cd198610e0792322e2a0797af.json index 5693bdf987e5..0027377ae596 100644 --- a/core/lib/dal/.sqlx/query-0429f2fa683bdff6fc1ff5069de69d57dbfda4be1f70232afffca82a895d43e0.json +++ b/core/lib/dal/.sqlx/query-12c062c6a5078ebcbde378126a3773e86be9876cd198610e0792322e2a0797af.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH\n sl AS (\n SELECT\n (\n SELECT\n ARRAY[hashed_key, value] AS kv\n FROM\n storage_logs\n WHERE\n storage_logs.miniblock_number = $1\n AND storage_logs.hashed_key >= u.start_key\n AND storage_logs.hashed_key <= u.end_key\n ORDER BY\n storage_logs.hashed_key\n LIMIT\n 1\n )\n FROM\n UNNEST($2::bytea [], $3::bytea []) AS u (start_key, end_key)\n )\n \n SELECT\n sl.kv[1] AS \"hashed_key?\",\n sl.kv[2] AS \"value?\",\n initial_writes.index\n FROM\n sl\n LEFT OUTER JOIN initial_writes ON initial_writes.hashed_key = sl.kv[1]\n ", + "query": "\n WITH\n sl AS (\n SELECT\n (\n SELECT\n ARRAY[hashed_key, value] AS kv\n FROM\n storage_logs\n WHERE\n storage_logs.miniblock_number <= $1\n AND storage_logs.hashed_key >= u.start_key\n AND storage_logs.hashed_key <= u.end_key\n ORDER BY\n storage_logs.hashed_key\n LIMIT\n 1\n )\n FROM\n UNNEST($2::bytea [], $3::bytea []) AS u (start_key, end_key)\n )\n \n SELECT\n sl.kv[1] AS \"hashed_key?\",\n sl.kv[2] AS \"value?\",\n initial_writes.index\n FROM\n sl\n LEFT OUTER JOIN initial_writes ON initial_writes.hashed_key = sl.kv[1]\n ", "describe": { "columns": [ { @@ -32,5 +32,5 @@ true ] }, - "hash": "0429f2fa683bdff6fc1ff5069de69d57dbfda4be1f70232afffca82a895d43e0" + "hash": "12c062c6a5078ebcbde378126a3773e86be9876cd198610e0792322e2a0797af" } diff --git a/core/lib/dal/.sqlx/query-1689c212d411ebd99a22210519ea2d505a1aabf52ff4136d2ed1b39c70dd1632.json b/core/lib/dal/.sqlx/query-1689c212d411ebd99a22210519ea2d505a1aabf52ff4136d2ed1b39c70dd1632.json index 7b939d137db9..b84cd1bcba89 100644 --- a/core/lib/dal/.sqlx/query-1689c212d411ebd99a22210519ea2d505a1aabf52ff4136d2ed1b39c70dd1632.json +++ b/core/lib/dal/.sqlx/query-1689c212d411ebd99a22210519ea2d505a1aabf52ff4136d2ed1b39c70dd1632.json @@ -182,6 +182,16 @@ "ordinal": 35, "name": "upgrade_id", "type_info": "Int4" + }, + { + "ordinal": 36, + "name": "timestamp_asserter_range_start", + "type_info": "Timestamp" + }, + { + "ordinal": 37, + "name": "timestamp_asserter_range_end", + "type_info": "Timestamp" } ], "parameters": { @@ -223,6 +233,8 @@ false, true, true, + true, + true, true ] }, diff --git a/core/lib/dal/.sqlx/query-180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78.json b/core/lib/dal/.sqlx/query-180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78.json new file mode 100644 index 000000000000..b40bdca666b8 --- /dev/null +++ b/core/lib/dal/.sqlx/query-180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM l1_batches\n WHERE\n number > $1\n AND NOT is_sealed\n RETURNING number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78" +} diff --git a/core/lib/dal/.sqlx/query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json b/core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json similarity index 78% rename from core/lib/dal/.sqlx/query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json rename to core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json index dffd3ed8f9d2..48adcd412676 100644 --- a/core/lib/dal/.sqlx/query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json +++ b/core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -175,8 +195,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8" + "hash": "1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7" } diff --git a/core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json b/core/lib/dal/.sqlx/query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json similarity index 83% rename from core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json rename to core/lib/dal/.sqlx/query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json index c8c438295e49..5c4ce3d6a4e3 100644 --- a/core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json +++ b/core/lib/dal/.sqlx/query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n l2_da_validator_address,\n pubdata_type\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -92,6 +92,16 @@ "ordinal": 17, "name": "logs_bloom", "type_info": "Bytea" + }, + { + "ordinal": 18, + "name": "l2_da_validator_address", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "pubdata_type", + "type_info": "Text" } ], "parameters": { @@ -115,8 +125,10 @@ false, true, true, - true + true, + false, + false ] }, - "hash": "a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756" + "hash": "250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8" } diff --git a/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json b/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json deleted file mode 100644 index 9d8cc36189fc..000000000000 --- a/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN\n tee_verifier_input_producer_jobs AS inputs\n ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $1\n AND proofs.status = $2\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Text" - ] - }, - "nullable": [ - false - ] - }, - "hash": "2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d" -} diff --git a/core/lib/dal/.sqlx/query-442212bb5f28f234cd624f2acc27944b2acedce201da4454aadb79f3545713ae.json b/core/lib/dal/.sqlx/query-2ae0541e9af1a9966585a25dfe772cb2ea9f2209fe2c12dda6c72c96bdb496d3.json similarity index 72% rename from core/lib/dal/.sqlx/query-442212bb5f28f234cd624f2acc27944b2acedce201da4454aadb79f3545713ae.json rename to core/lib/dal/.sqlx/query-2ae0541e9af1a9966585a25dfe772cb2ea9f2209fe2c12dda6c72c96bdb496d3.json index 621295d4ab81..b706a9df4373 100644 --- a/core/lib/dal/.sqlx/query-442212bb5f28f234cd624f2acc27944b2acedce201da4454aadb79f3545713ae.json +++ b/core/lib/dal/.sqlx/query-2ae0541e9af1a9966585a25dfe772cb2ea9f2209fe2c12dda6c72c96bdb496d3.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n storage_logs.hashed_key,\n storage_logs.value,\n initial_writes.index\n FROM\n storage_logs\n INNER JOIN initial_writes ON storage_logs.hashed_key = initial_writes.hashed_key\n WHERE\n storage_logs.miniblock_number = $1\n AND storage_logs.hashed_key >= $2::bytea\n AND storage_logs.hashed_key <= $3::bytea\n ORDER BY\n storage_logs.hashed_key\n ", + "query": "\n SELECT\n storage_logs.hashed_key,\n storage_logs.value,\n initial_writes.index\n FROM\n storage_logs\n INNER JOIN initial_writes ON storage_logs.hashed_key = initial_writes.hashed_key\n WHERE\n storage_logs.miniblock_number <= $1\n AND storage_logs.hashed_key >= $2::bytea\n AND storage_logs.hashed_key <= $3::bytea\n ORDER BY\n storage_logs.hashed_key\n ", "describe": { "columns": [ { @@ -32,5 +32,5 @@ false ] }, - "hash": "442212bb5f28f234cd624f2acc27944b2acedce201da4454aadb79f3545713ae" + "hash": "2ae0541e9af1a9966585a25dfe772cb2ea9f2209fe2c12dda6c72c96bdb496d3" } diff --git a/core/lib/dal/.sqlx/query-2fa2ba4a62f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json b/core/lib/dal/.sqlx/query-2fa2ba4a62f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json index 1d515edba819..0db6ba6f51b6 100644 --- a/core/lib/dal/.sqlx/query-2fa2ba4a62f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json +++ b/core/lib/dal/.sqlx/query-2fa2ba4a62f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json @@ -69,7 +69,7 @@ false, false, false, - false, + true, false, false, true, diff --git a/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json b/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json deleted file mode 100644 index a273eb249a4e..000000000000 --- a/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n DELETE FROM tee_verifier_input_producer_jobs\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [] - }, - "nullable": [] - }, - "hash": "2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d" -} diff --git a/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json b/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json deleted file mode 100644 index 6012c6326515..000000000000 --- a/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n updated_at = NOW(),\n time_taken = $3,\n error = $4\n WHERE\n l1_batch_number = $2\n AND status != $5\n RETURNING\n tee_verifier_input_producer_jobs.attempts\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Int8", - "Time", - "Text", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - } - ] - }, - "nullable": [ - false - ] - }, - "hash": "3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79" -} diff --git a/core/lib/dal/.sqlx/query-55f4585be3d0f1a147cb10f6e59325fad494a512ba92df95439d2d7fe0f3a285.json b/core/lib/dal/.sqlx/query-398598e20f1892b47bf749b220f611345ef888824e0ca3c5f39befbbc5bd0388.json similarity index 65% rename from core/lib/dal/.sqlx/query-55f4585be3d0f1a147cb10f6e59325fad494a512ba92df95439d2d7fe0f3a285.json rename to core/lib/dal/.sqlx/query-398598e20f1892b47bf749b220f611345ef888824e0ca3c5f39befbbc5bd0388.json index ecf54f0417b8..ffe785d754ca 100644 --- a/core/lib/dal/.sqlx/query-55f4585be3d0f1a147cb10f6e59325fad494a512ba92df95439d2d7fe0f3a285.json +++ b/core/lib/dal/.sqlx/query-398598e20f1892b47bf749b220f611345ef888824e0ca3c5f39befbbc5bd0388.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE l1_batches\n SET\n commitment = $1,\n aux_data_hash = $2,\n pass_through_data_hash = $3,\n meta_parameters_hash = $4,\n l2_l1_merkle_root = $5,\n zkporter_is_available = $6,\n compressed_state_diffs = $7,\n compressed_initial_writes = $8,\n compressed_repeated_writes = $9,\n updated_at = NOW()\n WHERE\n number = $10\n AND commitment IS NULL\n ", + "query": "\n UPDATE l1_batches\n SET\n commitment = $1,\n aux_data_hash = $2,\n pass_through_data_hash = $3,\n meta_parameters_hash = $4,\n l2_l1_merkle_root = $5,\n zkporter_is_available = $6,\n compressed_state_diffs = $7,\n compressed_initial_writes = $8,\n compressed_repeated_writes = $9,\n state_diff_hash = $10,\n aggregation_root = $11,\n local_root = $12,\n updated_at = NOW()\n WHERE\n number = $13\n AND commitment IS NULL\n ", "describe": { "columns": [], "parameters": { @@ -14,10 +14,13 @@ "Bytea", "Bytea", "Bytea", + "Bytea", + "Bytea", + "Bytea", "Int8" ] }, "nullable": [] }, - "hash": "55f4585be3d0f1a147cb10f6e59325fad494a512ba92df95439d2d7fe0f3a285" + "hash": "398598e20f1892b47bf749b220f611345ef888824e0ca3c5f39befbbc5bd0388" } diff --git a/core/lib/dal/.sqlx/query-40a7a619ed490eb3848a45fdde787f3b5d4ba906ec9a38e10912e3c7832fc93e.json b/core/lib/dal/.sqlx/query-40a7a619ed490eb3848a45fdde787f3b5d4ba906ec9a38e10912e3c7832fc93e.json new file mode 100644 index 000000000000..1b68326b8962 --- /dev/null +++ b/core/lib/dal/.sqlx/query-40a7a619ed490eb3848a45fdde787f3b5d4ba906ec9a38e10912e3c7832fc93e.json @@ -0,0 +1,42 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n factory_deps.bytecode_hash,\n factory_deps.bytecode,\n transactions.data -> 'calldata' AS \"calldata?\",\n transactions.contract_address AS \"contract_address?\"\n FROM\n (\n SELECT\n miniblock_number,\n tx_hash,\n topic3\n FROM\n events\n WHERE\n address = $1\n AND topic1 = $2\n AND topic4 = $3\n LIMIT\n 1\n ) deploy_event\n JOIN factory_deps ON factory_deps.bytecode_hash = deploy_event.topic3\n LEFT JOIN transactions ON transactions.hash = deploy_event.tx_hash\n WHERE\n deploy_event.miniblock_number <= (\n SELECT\n MAX(number)\n FROM\n miniblocks\n )\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "bytecode_hash", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "bytecode", + "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "calldata?", + "type_info": "Jsonb" + }, + { + "ordinal": 3, + "name": "contract_address?", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Bytea" + ] + }, + "nullable": [ + false, + false, + null, + true + ] + }, + "hash": "40a7a619ed490eb3848a45fdde787f3b5d4ba906ec9a38e10912e3c7832fc93e" +} diff --git a/core/lib/dal/.sqlx/query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json b/core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json similarity index 70% rename from core/lib/dal/.sqlx/query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json rename to core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json index 8c22b4f92c4e..11bff1102932 100644 --- a/core/lib/dal/.sqlx/query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json +++ b/core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -181,8 +201,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf" + "hash": "45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746" } diff --git a/core/lib/dal/.sqlx/query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json b/core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json similarity index 79% rename from core/lib/dal/.sqlx/query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json rename to core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json index e55d10d6f9a8..66d3e18075bf 100644 --- a/core/lib/dal/.sqlx/query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json +++ b/core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -179,8 +199,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7" + "hash": "4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970" } diff --git a/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json b/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json deleted file mode 100644 index f34c4a548cb3..000000000000 --- a/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n attempts\n FROM\n tee_verifier_input_producer_jobs\n WHERE\n l1_batch_number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78" -} diff --git a/core/lib/dal/.sqlx/query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json b/core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json similarity index 79% rename from core/lib/dal/.sqlx/query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json rename to core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json index 84f677a36c86..dfdb4b6c82e7 100644 --- a/core/lib/dal/.sqlx/query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json +++ b/core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -177,8 +197,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e" + "hash": "62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37" } diff --git a/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json b/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json deleted file mode 100644 index 01ede1d8643a..000000000000 --- a/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n tee_verifier_input_producer_jobs (\n l1_batch_number, status, created_at, updated_at\n )\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - } - ] - }, - "nullable": [] - }, - "hash": "6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199" -} diff --git a/core/lib/dal/.sqlx/query-72a4f50355324cce85ebaef9fa32826095e9290f0c1157094bd0c44e06012e42.json b/core/lib/dal/.sqlx/query-72a4f50355324cce85ebaef9fa32826095e9290f0c1157094bd0c44e06012e42.json index 707b7ce9e75c..2f4203aaa326 100644 --- a/core/lib/dal/.sqlx/query-72a4f50355324cce85ebaef9fa32826095e9290f0c1157094bd0c44e06012e42.json +++ b/core/lib/dal/.sqlx/query-72a4f50355324cce85ebaef9fa32826095e9290f0c1157094bd0c44e06012e42.json @@ -182,6 +182,16 @@ "ordinal": 35, "name": "upgrade_id", "type_info": "Int4" + }, + { + "ordinal": 36, + "name": "timestamp_asserter_range_start", + "type_info": "Timestamp" + }, + { + "ordinal": 37, + "name": "timestamp_asserter_range_end", + "type_info": "Timestamp" } ], "parameters": { @@ -225,6 +235,8 @@ false, true, true, + true, + true, true ] }, diff --git a/core/lib/dal/.sqlx/query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json b/core/lib/dal/.sqlx/query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json similarity index 83% rename from core/lib/dal/.sqlx/query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json rename to core/lib/dal/.sqlx/query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json index b8f8db874b63..6cc2e22382dd 100644 --- a/core/lib/dal/.sqlx/query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json +++ b/core/lib/dal/.sqlx/query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.evm_emulator_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.evm_emulator_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\",\n miniblocks.l2_da_validator_address AS \"l2_da_validator_address!\",\n miniblocks.pubdata_type AS \"pubdata_type!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", "describe": { "columns": [ { @@ -72,6 +72,16 @@ "ordinal": 13, "name": "fee_account_address!", "type_info": "Bytea" + }, + { + "ordinal": 14, + "name": "l2_da_validator_address!", + "type_info": "Bytea" + }, + { + "ordinal": 15, + "name": "pubdata_type!", + "type_info": "Text" } ], "parameters": { @@ -94,8 +104,10 @@ false, false, true, + false, + false, false ] }, - "hash": "2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f" + "hash": "7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2" } diff --git a/core/lib/dal/.sqlx/query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json b/core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json similarity index 80% rename from core/lib/dal/.sqlx/query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json rename to core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json index 80a6946026b0..f4e08abe31c5 100644 --- a/core/lib/dal/.sqlx/query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json +++ b/core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n is_sealed\n AND number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -177,8 +197,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960" + "hash": "77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9" } diff --git a/core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json b/core/lib/dal/.sqlx/query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json similarity index 55% rename from core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json rename to core/lib/dal/.sqlx/query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json index 35c606bf22bb..f89f531c4463 100644 --- a/core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json +++ b/core/lib/dal/.sqlx/query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n NOW(),\n NOW()\n )\n ", + "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n l2_da_validator_address,\n pubdata_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n NOW(),\n NOW()\n )\n ", "describe": { "columns": [], "parameters": { @@ -22,10 +22,12 @@ "Int8", "Int8", "Int8", - "Bytea" + "Bytea", + "Bytea", + "Text" ] }, "nullable": [] }, - "hash": "34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04" + "hash": "7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba" } diff --git a/core/lib/dal/.sqlx/query-8ab1634beba74aaef952562a3bcc84b0dd496700a61569929dcc7602ec678b09.json b/core/lib/dal/.sqlx/query-8ab1634beba74aaef952562a3bcc84b0dd496700a61569929dcc7602ec678b09.json deleted file mode 100644 index 5869c1d37a04..000000000000 --- a/core/lib/dal/.sqlx/query-8ab1634beba74aaef952562a3bcc84b0dd496700a61569929dcc7602ec678b09.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n factory_deps.bytecode,\n transactions.data AS \"data?\",\n transactions.contract_address AS \"contract_address?\"\n FROM\n (\n SELECT\n miniblock_number,\n tx_hash,\n topic3\n FROM\n events\n WHERE\n address = $1\n AND topic1 = $2\n AND topic4 = $3\n LIMIT\n 1\n ) deploy_event\n JOIN factory_deps ON factory_deps.bytecode_hash = deploy_event.topic3\n LEFT JOIN transactions ON transactions.hash = deploy_event.tx_hash\n WHERE\n deploy_event.miniblock_number <= (\n SELECT\n MAX(number)\n FROM\n miniblocks\n )\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "bytecode", - "type_info": "Bytea" - }, - { - "ordinal": 1, - "name": "data?", - "type_info": "Jsonb" - }, - { - "ordinal": 2, - "name": "contract_address?", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Bytea" - ] - }, - "nullable": [ - false, - true, - true - ] - }, - "hash": "8ab1634beba74aaef952562a3bcc84b0dd496700a61569929dcc7602ec678b09" -} diff --git a/core/lib/dal/.sqlx/query-a115f795672787fe25bfaa8fd345094de508af93f4085be7cf3b54b1e8ecdadd.json b/core/lib/dal/.sqlx/query-a115f795672787fe25bfaa8fd345094de508af93f4085be7cf3b54b1e8ecdadd.json index ebe8ce232cfb..ac7989a5be77 100644 --- a/core/lib/dal/.sqlx/query-a115f795672787fe25bfaa8fd345094de508af93f4085be7cf3b54b1e8ecdadd.json +++ b/core/lib/dal/.sqlx/query-a115f795672787fe25bfaa8fd345094de508af93f4085be7cf3b54b1e8ecdadd.json @@ -67,7 +67,7 @@ false, false, false, - false, + true, false, false, true, diff --git a/core/lib/dal/.sqlx/query-a36135b5908992324c4308f549ea77a428820fdcea9969aff3b29ca16727357b.json b/core/lib/dal/.sqlx/query-a36135b5908992324c4308f549ea77a428820fdcea9969aff3b29ca16727357b.json index 1d27af2bbc1a..50d3ce5188df 100644 --- a/core/lib/dal/.sqlx/query-a36135b5908992324c4308f549ea77a428820fdcea9969aff3b29ca16727357b.json +++ b/core/lib/dal/.sqlx/query-a36135b5908992324c4308f549ea77a428820fdcea9969aff3b29ca16727357b.json @@ -182,6 +182,16 @@ "ordinal": 35, "name": "upgrade_id", "type_info": "Int4" + }, + { + "ordinal": 36, + "name": "timestamp_asserter_range_start", + "type_info": "Timestamp" + }, + { + "ordinal": 37, + "name": "timestamp_asserter_range_end", + "type_info": "Timestamp" } ], "parameters": { @@ -228,6 +238,8 @@ false, true, true, + true, + true, true ] }, diff --git a/core/lib/dal/.sqlx/query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json b/core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json similarity index 73% rename from core/lib/dal/.sqlx/query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json rename to core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json index 4f138822ad1b..9a93ba45978e 100644 --- a/core/lib/dal/.sqlx/query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json +++ b/core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -178,8 +198,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8" + "hash": "a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789" } diff --git a/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json b/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json deleted file mode 100644 index b17b58282110..000000000000 --- a/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_proof_generation_details\n SET\n status = $1,\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n tee_type = $2\n AND l1_batch_number = (\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN\n tee_verifier_input_producer_jobs AS inputs\n ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $3\n AND (\n proofs.status = $4\n OR (\n proofs.status = $1\n AND proofs.prover_taken_at < NOW() - $5::INTERVAL\n )\n )\n AND proofs.l1_batch_number >= $6\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_proof_generation_details.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Text", - "Text", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Text", - "Interval", - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0" -} diff --git a/core/lib/dal/.sqlx/query-ab3f97cf96ef769346703e0c132802690bdd139505ba22be3655e306773abc77.json b/core/lib/dal/.sqlx/query-ab3f97cf96ef769346703e0c132802690bdd139505ba22be3655e306773abc77.json new file mode 100644 index 000000000000..2edb0822ac69 --- /dev/null +++ b/core/lib/dal/.sqlx/query-ab3f97cf96ef769346703e0c132802690bdd139505ba22be3655e306773abc77.json @@ -0,0 +1,42 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n nonce,\n signature,\n gas_limit,\n max_fee_per_gas,\n max_priority_fee_per_gas,\n gas_per_pubdata_limit,\n input,\n data,\n tx_format,\n contract_address,\n value,\n paymaster,\n paymaster_input,\n execution_info,\n received_at,\n timestamp_asserter_range_start,\n timestamp_asserter_range_end,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n FALSE,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n JSONB_BUILD_OBJECT(\n 'gas_used',\n $16::BIGINT,\n 'storage_writes',\n $17::INT,\n 'contracts_used',\n $18::INT\n ),\n $19,\n $20,\n $21,\n NOW(),\n NOW()\n )\n ON CONFLICT (initiator_address, nonce) DO\n UPDATE\n SET\n hash = $1,\n signature = $4,\n gas_limit = $5,\n max_fee_per_gas = $6,\n max_priority_fee_per_gas = $7,\n gas_per_pubdata_limit = $8,\n input = $9,\n data = $10,\n tx_format = $11,\n contract_address = $12,\n value = $13,\n paymaster = $14,\n paymaster_input = $15,\n execution_info\n = JSONB_BUILD_OBJECT(\n 'gas_used',\n $16::BIGINT,\n 'storage_writes',\n $17::INT,\n 'contracts_used',\n $18::INT\n ),\n in_mempool = FALSE,\n received_at = $19,\n timestamp_asserter_range_start = $20,\n timestamp_asserter_range_end = $21,\n created_at = NOW(),\n updated_at = NOW(),\n error = NULL\n WHERE\n transactions.is_priority = FALSE\n AND transactions.miniblock_number IS NULL\n RETURNING\n (\n SELECT\n hash\n FROM\n transactions\n WHERE\n transactions.initiator_address = $2\n AND transactions.nonce = $3\n ) IS NOT NULL AS \"is_replaced!\"\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "is_replaced!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Int8", + "Bytea", + "Numeric", + "Numeric", + "Numeric", + "Numeric", + "Bytea", + "Jsonb", + "Int4", + "Bytea", + "Numeric", + "Bytea", + "Bytea", + "Int8", + "Int4", + "Int4", + "Timestamp", + "Timestamp", + "Timestamp" + ] + }, + "nullable": [ + null + ] + }, + "hash": "ab3f97cf96ef769346703e0c132802690bdd139505ba22be3655e306773abc77" +} diff --git a/core/lib/dal/.sqlx/query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json b/core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json similarity index 73% rename from core/lib/dal/.sqlx/query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json rename to core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json index afac14e6d5cd..8a68b1a9b9bd 100644 --- a/core/lib/dal/.sqlx/query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json +++ b/core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -180,8 +200,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b" + "hash": "b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd" } diff --git a/core/lib/dal/.sqlx/query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json b/core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json similarity index 77% rename from core/lib/dal/.sqlx/query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json rename to core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json index 4eae4f778cee..f97ea8a6ccd5 100644 --- a/core/lib/dal/.sqlx/query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json +++ b/core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -142,6 +142,26 @@ "ordinal": 27, "name": "fee_address", "type_info": "Bytea" + }, + { + "ordinal": 28, + "name": "aggregation_root", + "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -177,8 +197,12 @@ true, true, true, - false + false, + true, + true, + true, + true ] }, - "hash": "5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2" + "hash": "c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b" } diff --git a/core/lib/dal/.sqlx/query-ca428423f278feea2942fd2c78fc5223c9d5e2e42d89bb456d24c601edc06a05.json b/core/lib/dal/.sqlx/query-ca428423f278feea2942fd2c78fc5223c9d5e2e42d89bb456d24c601edc06a05.json deleted file mode 100644 index c234cbe42356..000000000000 --- a/core/lib/dal/.sqlx/query-ca428423f278feea2942fd2c78fc5223c9d5e2e42d89bb456d24c601edc06a05.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n nonce,\n signature,\n gas_limit,\n max_fee_per_gas,\n max_priority_fee_per_gas,\n gas_per_pubdata_limit,\n input,\n data,\n tx_format,\n contract_address,\n value,\n paymaster,\n paymaster_input,\n execution_info,\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n FALSE,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n JSONB_BUILD_OBJECT(\n 'gas_used',\n $16::BIGINT,\n 'storage_writes',\n $17::INT,\n 'contracts_used',\n $18::INT\n ),\n $19,\n NOW(),\n NOW()\n )\n ON CONFLICT (initiator_address, nonce) DO\n UPDATE\n SET\n hash = $1,\n signature = $4,\n gas_limit = $5,\n max_fee_per_gas = $6,\n max_priority_fee_per_gas = $7,\n gas_per_pubdata_limit = $8,\n input = $9,\n data = $10,\n tx_format = $11,\n contract_address = $12,\n value = $13,\n paymaster = $14,\n paymaster_input = $15,\n execution_info\n = JSONB_BUILD_OBJECT(\n 'gas_used',\n $16::BIGINT,\n 'storage_writes',\n $17::INT,\n 'contracts_used',\n $18::INT\n ),\n in_mempool = FALSE,\n received_at = $19,\n created_at = NOW(),\n updated_at = NOW(),\n error = NULL\n WHERE\n transactions.is_priority = FALSE\n AND transactions.miniblock_number IS NULL\n RETURNING\n (\n SELECT\n hash\n FROM\n transactions\n WHERE\n transactions.initiator_address = $2\n AND transactions.nonce = $3\n ) IS NOT NULL AS \"is_replaced!\"\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "is_replaced!", - "type_info": "Bool" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Int8", - "Bytea", - "Numeric", - "Numeric", - "Numeric", - "Numeric", - "Bytea", - "Jsonb", - "Int4", - "Bytea", - "Numeric", - "Bytea", - "Bytea", - "Int8", - "Int4", - "Int4", - "Timestamp" - ] - }, - "nullable": [ - null - ] - }, - "hash": "ca428423f278feea2942fd2c78fc5223c9d5e2e42d89bb456d24c601edc06a05" -} diff --git a/core/lib/dal/.sqlx/query-cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320.json b/core/lib/dal/.sqlx/query-cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320.json new file mode 100644 index 000000000000..4b219bfee0a5 --- /dev/null +++ b/core/lib/dal/.sqlx/query-cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH upsert AS (\n SELECT\n p.l1_batch_number\n FROM\n proof_generation_details p\n LEFT JOIN\n tee_proof_generation_details tee\n ON\n p.l1_batch_number = tee.l1_batch_number\n AND tee.tee_type = $1\n WHERE\n (\n p.l1_batch_number >= $5\n AND p.vm_run_data_blob_url IS NOT NULL\n AND p.proof_gen_data_blob_url IS NOT NULL\n )\n AND (\n tee.l1_batch_number IS NULL\n OR (\n tee.status = $3\n OR (\n tee.status = $2\n AND tee.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n )\n FETCH FIRST ROW ONLY\n )\n \n INSERT INTO\n tee_proof_generation_details (\n l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at\n )\n SELECT\n l1_batch_number,\n $1,\n $2,\n NOW(),\n NOW(),\n NOW()\n FROM\n upsert\n ON CONFLICT (l1_batch_number, tee_type) DO\n UPDATE\n SET\n status = $2,\n updated_at = NOW(),\n prover_taken_at = NOW()\n RETURNING\n l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text", + "Interval", + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320" +} diff --git a/core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json b/core/lib/dal/.sqlx/query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json similarity index 84% rename from core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json rename to core/lib/dal/.sqlx/query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json index 700352c1a8bf..111234e02b75 100644 --- a/core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json +++ b/core/lib/dal/.sqlx/query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n l2_da_validator_address,\n pubdata_type\n FROM\n miniblocks\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -92,6 +92,16 @@ "ordinal": 17, "name": "logs_bloom", "type_info": "Bytea" + }, + { + "ordinal": 18, + "name": "l2_da_validator_address", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "pubdata_type", + "type_info": "Text" } ], "parameters": { @@ -117,8 +127,10 @@ false, true, true, - true + true, + false, + false ] }, - "hash": "f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8" + "hash": "d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9" } diff --git a/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json b/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json deleted file mode 100644 index fa1a5d6741ad..000000000000 --- a/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n tee_verifier_input_producer_jobs\n WHERE\n status = $2\n OR (\n status = $1\n AND processing_started_at < NOW() - $4::INTERVAL\n )\n OR (\n status = $3\n AND attempts < $5\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_verifier_input_producer_jobs.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Interval", - "Int2" - ] - }, - "nullable": [ - false - ] - }, - "hash": "d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860" -} diff --git a/core/lib/dal/.sqlx/query-eb27e1b82b8ecbb9711c417888564a8e245ecee4866264d38146938595b07f37.json b/core/lib/dal/.sqlx/query-eb27e1b82b8ecbb9711c417888564a8e245ecee4866264d38146938595b07f37.json index 2419082dcc23..079ce55bd569 100644 --- a/core/lib/dal/.sqlx/query-eb27e1b82b8ecbb9711c417888564a8e245ecee4866264d38146938595b07f37.json +++ b/core/lib/dal/.sqlx/query-eb27e1b82b8ecbb9711c417888564a8e245ecee4866264d38146938595b07f37.json @@ -182,6 +182,16 @@ "ordinal": 35, "name": "upgrade_id", "type_info": "Int4" + }, + { + "ordinal": 36, + "name": "timestamp_asserter_range_start", + "type_info": "Timestamp" + }, + { + "ordinal": 37, + "name": "timestamp_asserter_range_end", + "type_info": "Timestamp" } ], "parameters": { @@ -226,6 +236,8 @@ false, true, true, + true, + true, true ] }, diff --git a/core/lib/dal/.sqlx/query-f023e5fa599b279acd6ac02dffb7a33a8fea8ab7fdefb7d9210673245a2a6f6c.json b/core/lib/dal/.sqlx/query-f023e5fa599b279acd6ac02dffb7a33a8fea8ab7fdefb7d9210673245a2a6f6c.json index 2cd001b274da..8c43f8865ac7 100644 --- a/core/lib/dal/.sqlx/query-f023e5fa599b279acd6ac02dffb7a33a8fea8ab7fdefb7d9210673245a2a6f6c.json +++ b/core/lib/dal/.sqlx/query-f023e5fa599b279acd6ac02dffb7a33a8fea8ab7fdefb7d9210673245a2a6f6c.json @@ -182,6 +182,16 @@ "ordinal": 35, "name": "upgrade_id", "type_info": "Int4" + }, + { + "ordinal": 36, + "name": "timestamp_asserter_range_start", + "type_info": "Timestamp" + }, + { + "ordinal": 37, + "name": "timestamp_asserter_range_end", + "type_info": "Timestamp" } ], "parameters": { @@ -225,6 +235,8 @@ false, true, true, + true, + true, true ] }, diff --git a/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json b/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json new file mode 100644 index 000000000000..12e28266fbcc --- /dev/null +++ b/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n WHERE\n proofs.status = $1\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662" +} diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index db03b8de9825..4b093dd181bb 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -15,7 +15,6 @@ links = "zksync_dal_proto" [dependencies] vise.workspace = true zksync_vm_interface.workspace = true -zksync_utils.workspace = true zksync_system_constants.workspace = true zksync_contracts.workspace = true zksync_types.workspace = true @@ -56,7 +55,7 @@ tracing.workspace = true chrono = { workspace = true, features = ["serde"] } [dev-dependencies] -zksync_test_account.workspace = true +zksync_test_contracts.workspace = true zksync_concurrency.workspace = true [build-dependencies] diff --git a/core/lib/dal/migrations/20240925103531_gateway_upgrade.down.sql b/core/lib/dal/migrations/20240925103531_gateway_upgrade.down.sql new file mode 100644 index 000000000000..9af34d7dc8ee --- /dev/null +++ b/core/lib/dal/migrations/20240925103531_gateway_upgrade.down.sql @@ -0,0 +1,8 @@ +ALTER TABLE l1_batches DROP COLUMN IF EXISTS state_diff_hash BYTEA; + +ALTER TABLE l1_batches DROP COLUMN IF EXISTS aggregation_root; +ALTER TABLE l1_batches DROP COLUMN IF EXISTS local_root; + +ALTER TABLE miniblocks + DROP COLUMN IF EXISTS l2_da_validator_address, + DROP COLUMN IF EXISTS pubdata_type; diff --git a/core/lib/dal/migrations/20240925103531_gateway_upgrade.up.sql b/core/lib/dal/migrations/20240925103531_gateway_upgrade.up.sql new file mode 100644 index 000000000000..a58464f6ebb3 --- /dev/null +++ b/core/lib/dal/migrations/20240925103531_gateway_upgrade.up.sql @@ -0,0 +1,11 @@ +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS state_diff_hash BYTEA; + +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS aggregation_root BYTEA; +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS local_root BYTEA; + +ALTER TABLE miniblocks + ADD COLUMN IF NOT EXISTS l2_da_validator_address BYTEA NOT NULL DEFAULT '\x0000000000000000000000000000000000000000'::bytea, + -- There are miniblocks that used the `Rollup' type, but were actually used on a Validium chain. + -- This is okay, since this field represents how the VM works with the DA, rather what is committed on L1. + ADD COLUMN IF NOT EXISTS pubdata_type TEXT NOT NULL DEFAULT 'Rollup'; +-- ^ Add a default value so that DB queries don't fail even if the DB migration is not completed. diff --git a/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql new file mode 100644 index 000000000000..707ce306365c --- /dev/null +++ b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql @@ -0,0 +1,20 @@ +CREATE TABLE tee_verifier_input_producer_jobs ( + l1_batch_number BIGINT NOT NULL, + status TEXT NOT NULL, + signature BYTEA, + pubkey BYTEA, + proof BYTEA, + tee_type TEXT NOT NULL, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + prover_taken_at TIMESTAMP, + PRIMARY KEY (l1_batch_number, tee_type), + CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey FOREIGN KEY (l1_batch_number) REFERENCES tee_verifier_input_producer_jobs(l1_batch_number) ON DELETE CASCADE, + CONSTRAINT tee_proof_generation_details_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES tee_attestations(pubkey) ON DELETE SET NULL +); + +ALTER TABLE tee_proof_generation_details + ADD CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey + FOREIGN KEY (l1_batch_number) + REFERENCES tee_verifier_input_producer_jobs(l1_batch_number) + ON DELETE CASCADE; diff --git a/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql new file mode 100644 index 000000000000..c2417ba86b3b --- /dev/null +++ b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE tee_proof_generation_details DROP CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey; + +DROP TABLE IF EXISTS tee_verifier_input_producer_jobs; diff --git a/core/lib/dal/migrations/20241008073940_add_block_timestamp_asserter.down.sql b/core/lib/dal/migrations/20241008073940_add_block_timestamp_asserter.down.sql new file mode 100644 index 000000000000..87f6a8cb75a0 --- /dev/null +++ b/core/lib/dal/migrations/20241008073940_add_block_timestamp_asserter.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE transactions +DROP COLUMN timestamp_asserter_range_start, +DROP COLUMN timestamp_asserter_range_end; diff --git a/core/lib/dal/migrations/20241008073940_add_block_timestamp_asserter.up.sql b/core/lib/dal/migrations/20241008073940_add_block_timestamp_asserter.up.sql new file mode 100644 index 000000000000..103a22cb8e3c --- /dev/null +++ b/core/lib/dal/migrations/20241008073940_add_block_timestamp_asserter.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE transactions +ADD COLUMN timestamp_asserter_range_start TIMESTAMP DEFAULT NULL, +ADD COLUMN timestamp_asserter_range_end TIMESTAMP DEFAULT NULL; diff --git a/core/lib/dal/migrations/20241106093512_make_zk_compiler_version_nullable.down.sql b/core/lib/dal/migrations/20241106093512_make_zk_compiler_version_nullable.down.sql new file mode 100644 index 000000000000..2693a565fd02 --- /dev/null +++ b/core/lib/dal/migrations/20241106093512_make_zk_compiler_version_nullable.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE contract_verification_requests + ALTER COLUMN zk_compiler_version SET NOT NULL; diff --git a/core/lib/dal/migrations/20241106093512_make_zk_compiler_version_nullable.up.sql b/core/lib/dal/migrations/20241106093512_make_zk_compiler_version_nullable.up.sql new file mode 100644 index 000000000000..92a689956f55 --- /dev/null +++ b/core/lib/dal/migrations/20241106093512_make_zk_compiler_version_nullable.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE contract_verification_requests + ALTER COLUMN zk_compiler_version DROP NOT NULL; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index bf1b48130c40..943aa12caf75 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -344,10 +344,17 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE is_sealed AND number = $1 @@ -841,6 +848,8 @@ impl BlocksDal<'_, '_> { fair_pubdata_price, gas_limit, logs_bloom, + l2_da_validator_address, + pubdata_type, created_at, updated_at ) @@ -864,6 +873,8 @@ impl BlocksDal<'_, '_> { $16, $17, $18, + $19, + $20, NOW(), NOW() ) @@ -896,6 +907,11 @@ impl BlocksDal<'_, '_> { l2_block_header.batch_fee_input.fair_pubdata_price() as i64, l2_block_header.gas_limit as i64, l2_block_header.logs_bloom.as_bytes(), + l2_block_header + .pubdata_params + .l2_da_validator_address + .as_bytes(), + l2_block_header.pubdata_params.pubdata_type.to_string(), ); instrumentation.with(query).execute(self.storage).await?; @@ -924,7 +940,9 @@ impl BlocksDal<'_, '_> { virtual_blocks, fair_pubdata_price, gas_limit, - logs_bloom + logs_bloom, + l2_da_validator_address, + pubdata_type FROM miniblocks ORDER BY @@ -965,7 +983,9 @@ impl BlocksDal<'_, '_> { virtual_blocks, fair_pubdata_price, gas_limit, - logs_bloom + logs_bloom, + l2_da_validator_address, + pubdata_type FROM miniblocks WHERE @@ -1062,9 +1082,12 @@ impl BlocksDal<'_, '_> { compressed_state_diffs = $7, compressed_initial_writes = $8, compressed_repeated_writes = $9, + state_diff_hash = $10, + aggregation_root = $11, + local_root = $12, updated_at = NOW() WHERE - number = $10 + number = $13 AND commitment IS NULL "#, commitment_artifacts.commitment_hash.commitment.as_bytes(), @@ -1082,6 +1105,9 @@ impl BlocksDal<'_, '_> { commitment_artifacts.compressed_state_diffs, commitment_artifacts.compressed_initial_writes, commitment_artifacts.compressed_repeated_writes, + commitment_artifacts.state_diff_hash.as_bytes(), + commitment_artifacts.aggregation_root.as_bytes(), + commitment_artifacts.local_root.as_bytes(), i64::from(number.0), ) .instrument("save_l1_batch_commitment_artifacts") @@ -1189,10 +1215,17 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE number = 0 OR eth_commit_tx_id IS NOT NULL @@ -1377,10 +1410,17 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE eth_commit_tx_id IS NOT NULL AND eth_prove_tx_id IS NULL @@ -1459,7 +1499,11 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM ( SELECT @@ -1480,6 +1524,7 @@ impl BlocksDal<'_, '_> { $2 ) inn LEFT JOIN commitments ON commitments.l1_batch_number = inn.number + LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number WHERE number - row_number = $1 "#, @@ -1534,10 +1579,17 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL @@ -1663,10 +1715,17 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE number BETWEEN $1 AND $2 ORDER BY @@ -1729,11 +1788,18 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE eth_commit_tx_id IS NULL AND number != 0 @@ -1809,7 +1875,11 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, - fee_address + fee_address, + aggregation_root, + local_root, + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -2058,6 +2128,37 @@ impl BlocksDal<'_, '_> { Ok(()) } + /// Deletes the unsealed L1 batch from the storage. Expects the caller to make sure there are no + /// associated L2 blocks. + /// + /// Accepts `batch_to_keep` as a safety mechanism. + pub async fn delete_unsealed_l1_batch( + &mut self, + batch_to_keep: L1BatchNumber, + ) -> DalResult<()> { + let deleted_row = sqlx::query!( + r#" + DELETE FROM l1_batches + WHERE + number > $1 + AND NOT is_sealed + RETURNING number + "#, + i64::from(batch_to_keep.0) + ) + .instrument("delete_unsealed_l1_batch") + .with_arg("batch_to_keep", &batch_to_keep) + .fetch_optional(self.storage) + .await?; + if let Some(deleted_row) = deleted_row { + tracing::info!( + l1_batch_number = %deleted_row.number, + "Deleted unsealed batch" + ); + } + Ok(()) + } + /// Deletes all L1 batches from the storage so that the specified batch number is the last one left. pub async fn delete_l1_batches(&mut self, last_batch_to_keep: L1BatchNumber) -> DalResult<()> { self.delete_l1_batches_inner(Some(last_batch_to_keep)).await @@ -2184,6 +2285,20 @@ impl BlocksDal<'_, '_> { Ok(Some((L2BlockNumber(min as u32), L2BlockNumber(max as u32)))) } + /// Returns `true` if there exists a non-sealed batch (i.e. there is one+ stored L2 block that isn't assigned + /// to any batch yet). + pub async fn pending_batch_exists(&mut self) -> DalResult { + let count = sqlx::query_scalar!( + "SELECT COUNT(miniblocks.number) FROM miniblocks WHERE l1_batch_number IS NULL" + ) + .instrument("pending_batch_exists") + .fetch_one(self.storage) + .await? + .unwrap_or(0); + + Ok(count != 0) + } + // methods used for measuring Eth tx stage transition latencies // and emitting metrics base on these measured data pub async fn oldest_uncommitted_batch_timestamp(&mut self) -> DalResult> { diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 4cb577986380..4699eac4e5eb 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -11,12 +11,11 @@ use zksync_types::{ web3::{BlockHeader, Bytes}, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H160, H256, U256, U64, }; -use zksync_utils::bigdecimal_to_u256; use zksync_vm_interface::Call; use crate::{ models::{ - parse_protocol_version, + bigdecimal_to_u256, parse_protocol_version, storage_block::{ ResolvedL1BatchForL2Block, StorageBlockDetails, StorageL1BatchDetails, LEGACY_BLOCK_GAS_LIMIT, @@ -803,7 +802,7 @@ mod tests { block::{L2BlockHasher, L2BlockHeader}, Address, L2BlockNumber, ProtocolVersion, ProtocolVersionId, }; - use zksync_vm_interface::TransactionExecutionMetrics; + use zksync_vm_interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use super::*; use crate::{ @@ -1090,7 +1089,11 @@ mod tests { let mut tx_results = vec![]; for (i, tx) in transactions.into_iter().enumerate() { conn.transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); let mut tx_result = mock_execution_result(tx); diff --git a/core/lib/dal/src/consensus/conv.rs b/core/lib/dal/src/consensus/conv.rs index 269c47fa2dd1..3153343d6014 100644 --- a/core/lib/dal/src/consensus/conv.rs +++ b/core/lib/dal/src/consensus/conv.rs @@ -2,19 +2,21 @@ use anyhow::{anyhow, Context as _}; use zksync_concurrency::net; use zksync_consensus_roles::{attester, node}; -use zksync_protobuf::{read_required, required, ProtoFmt, ProtoRepr}; +use zksync_protobuf::{read_optional_repr, read_required, required, ProtoFmt, ProtoRepr}; use zksync_types::{ - abi, ethabi, + abi, + commitment::{L1BatchCommitmentMode, PubdataParams}, + ethabi, fee::Fee, + h256_to_u256, l1::{OpProcessingType, PriorityQueueType}, l2::TransactionType, parse_h160, parse_h256, protocol_upgrade::ProtocolUpgradeTxCommonData, transaction_request::PaymasterParams, - Execute, ExecuteTransactionCommon, InputData, L1BatchNumber, L1TxCommonData, L2TxCommonData, - Nonce, PriorityOpId, ProtocolVersionId, Transaction, H256, + u256_to_h256, Execute, ExecuteTransactionCommon, InputData, L1BatchNumber, L1TxCommonData, + L2TxCommonData, Nonce, PriorityOpId, ProtocolVersionId, Transaction, H256, }; -use zksync_utils::{h256_to_u256, u256_to_h256}; use super::*; @@ -102,6 +104,31 @@ impl ProtoFmt for AttestationStatus { } } +impl ProtoRepr for proto::PubdataParams { + type Type = PubdataParams; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + l2_da_validator_address: required(&self.l2_da_validator_address) + .and_then(|a| parse_h160(a)) + .context("l2_da_validator_address")?, + pubdata_type: required(&self.pubdata_type) + .and_then(|x| Ok(proto::L1BatchCommitDataGeneratorMode::try_from(*x)?)) + .context("pubdata_type")? + .parse(), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + l2_da_validator_address: Some(this.l2_da_validator_address.as_bytes().into()), + pubdata_type: Some( + proto::L1BatchCommitDataGeneratorMode::new(&this.pubdata_type) as i32, + ), + } + } +} + impl ProtoFmt for Payload { type Proto = proto::Payload; @@ -135,7 +162,7 @@ impl ProtoFmt for Payload { } } - Ok(Self { + let this = Self { protocol_version, hash: required(&r.hash) .and_then(|h| parse_h256(h)) @@ -153,10 +180,32 @@ impl ProtoFmt for Payload { .context("operator_address")?, transactions, last_in_batch: *required(&r.last_in_batch).context("last_in_batch")?, - }) + pubdata_params: read_optional_repr(&r.pubdata_params) + .context("pubdata_params")? + .unwrap_or_default(), + }; + if this.protocol_version.is_pre_gateway() { + anyhow::ensure!( + this.pubdata_params == PubdataParams::default(), + "pubdata_params should have the default value in pre-gateway protocol_version" + ); + } + if this.pubdata_params == PubdataParams::default() { + anyhow::ensure!( + r.pubdata_params.is_none(), + "default pubdata_params should be encoded as None" + ); + } + Ok(this) } fn build(&self) -> Self::Proto { + if self.protocol_version.is_pre_gateway() { + assert_eq!( + self.pubdata_params, PubdataParams::default(), + "BUG DETECTED: pubdata_params should have the default value in pre-gateway protocol_version" + ); + } let mut x = Self::Proto { protocol_version: Some((self.protocol_version as u16).into()), hash: Some(self.hash.as_bytes().into()), @@ -171,6 +220,11 @@ impl ProtoFmt for Payload { transactions: vec![], transactions_v25: vec![], last_in_batch: Some(self.last_in_batch), + pubdata_params: if self.pubdata_params == PubdataParams::default() { + None + } else { + Some(ProtoRepr::build(&self.pubdata_params)) + }, }; match self.protocol_version { v if v >= ProtocolVersionId::Version25 => { @@ -517,3 +571,19 @@ impl ProtoRepr for proto::AttesterCommittee { } } } + +impl proto::L1BatchCommitDataGeneratorMode { + pub(crate) fn new(n: &L1BatchCommitmentMode) -> Self { + match n { + L1BatchCommitmentMode::Rollup => Self::Rollup, + L1BatchCommitmentMode::Validium => Self::Validium, + } + } + + pub(crate) fn parse(&self) -> L1BatchCommitmentMode { + match self { + Self::Rollup => L1BatchCommitmentMode::Rollup, + Self::Validium => L1BatchCommitmentMode::Validium, + } + } +} diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index 8e88265730e9..96efc6348350 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -2,7 +2,9 @@ use std::collections::BTreeMap; use zksync_concurrency::net; use zksync_consensus_roles::{attester, node, validator}; -use zksync_types::{ethabi, Address, L1BatchNumber, ProtocolVersionId, Transaction, H256}; +use zksync_types::{ + commitment::PubdataParams, ethabi, Address, L1BatchNumber, ProtocolVersionId, Transaction, H256, +}; mod conv; pub mod proto; @@ -46,6 +48,7 @@ pub struct Payload { pub operator_address: Address, pub transactions: Vec, pub last_in_batch: bool, + pub pubdata_params: PubdataParams, } impl Payload { diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index 421904bf966b..49a69e8a36ec 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -26,6 +26,12 @@ message Payload { // Set for protocol_version >= 25. repeated TransactionV25 transactions_v25 = 12; optional bool last_in_batch = 10; // required + optional PubdataParams pubdata_params = 13; // optional +} + +message PubdataParams { + optional bytes l2_da_validator_address = 1; // required; H160 + optional L1BatchCommitDataGeneratorMode pubdata_type = 2; // required } message L1Transaction { @@ -142,3 +148,8 @@ message AttestationStatus { optional roles.validator.GenesisHash genesis = 1; // required optional uint64 next_batch_to_attest = 2; // required } + +enum L1BatchCommitDataGeneratorMode { + Rollup = 0; + Validium = 1; +} diff --git a/core/lib/dal/src/consensus/tests.rs b/core/lib/dal/src/consensus/tests.rs index e8342b7446cc..465148dc7b5c 100644 --- a/core/lib/dal/src/consensus/tests.rs +++ b/core/lib/dal/src/consensus/tests.rs @@ -1,15 +1,17 @@ use std::fmt::Debug; use rand::Rng; -use zksync_concurrency::ctx; +use zksync_concurrency::{ctx, testonly::abort_on_panic}; use zksync_protobuf::{ repr::{decode, encode}, testonly::{test_encode, test_encode_all_formats, FmtConv}, ProtoRepr, }; -use zksync_test_account::Account; +use zksync_test_contracts::Account; use zksync_types::{ - web3::Bytes, Execute, ExecuteTransactionCommon, L1BatchNumber, ProtocolVersionId, Transaction, + commitment::{L1BatchCommitmentMode, PubdataParams}, + web3::Bytes, + Execute, ExecuteTransactionCommon, L1BatchNumber, ProtocolVersionId, Transaction, }; use super::*; @@ -51,12 +53,24 @@ fn payload(rng: &mut impl Rng, protocol_version: ProtocolVersionId) -> Payload { }) .collect(), last_in_batch: rng.gen(), + pubdata_params: if protocol_version.is_pre_gateway() { + PubdataParams::default() + } else { + PubdataParams { + pubdata_type: match rng.gen_range(0..2) { + 0 => L1BatchCommitmentMode::Rollup, + _ => L1BatchCommitmentMode::Validium, + }, + l2_da_validator_address: rng.gen(), + } + }, } } /// Tests struct <-> proto struct conversions. #[test] fn test_encoding() { + abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); test_encode_all_formats::>(rng); @@ -69,10 +83,15 @@ fn test_encoding() { encode_decode::( mock_protocol_upgrade_transaction().into(), ); - let p = payload(rng, ProtocolVersionId::Version24); - test_encode(rng, &p); - let p = payload(rng, ProtocolVersionId::Version25); - test_encode(rng, &p); + // Test encoding in the current and all the future versions. + for v in ProtocolVersionId::latest() as u16.. { + let Ok(v) = ProtocolVersionId::try_from(v) else { + break; + }; + tracing::info!("version {v}"); + let p = payload(rng, v); + test_encode(rng, &p); + } } fn encode_decode(msg: P::Type) diff --git a/core/lib/dal/src/consensus_dal/mod.rs b/core/lib/dal/src/consensus_dal/mod.rs index 9515e93f2b3c..a091421d857c 100644 --- a/core/lib/dal/src/consensus_dal/mod.rs +++ b/core/lib/dal/src/consensus_dal/mod.rs @@ -16,10 +16,48 @@ use crate::{Core, CoreDal}; #[cfg(test)] mod tests; +/// Hash of the batch. pub fn batch_hash(info: &StoredBatchInfo) -> attester::BatchHash { attester::BatchHash(Keccak256::from_bytes(info.hash().0)) } +/// Verifies that the transition from `old` to `new` is admissible. +pub fn verify_config_transition(old: &GlobalConfig, new: &GlobalConfig) -> anyhow::Result<()> { + anyhow::ensure!( + old.genesis.chain_id == new.genesis.chain_id, + "changing chain_id is not allowed: old = {:?}, new = {:?}", + old.genesis.chain_id, + new.genesis.chain_id, + ); + // Note that it may happen that the fork number didn't change, + // in case the binary was updated to support more fields in genesis struct. + // In such a case, the old binary was not able to connect to the consensus network, + // because of the genesis hash mismatch. + // TODO: Perhaps it would be better to deny unknown fields in the genesis instead. + // It would require embedding the genesis either as a json string or protobuf bytes within + // the global config, so that the global config can be parsed with + // `deny_unknown_fields:false` while genesis would be parsed with + // `deny_unknown_fields:true`. + anyhow::ensure!( + old.genesis.fork_number <= new.genesis.fork_number, + "transition to a past fork is not allowed: old = {:?}, new = {:?}", + old.genesis.fork_number, + new.genesis.fork_number, + ); + new.genesis.verify().context("genesis.verify()")?; + // This is a temporary hack until the `consensus_genesis()` RPC is disabled. + if new + == (&GlobalConfig { + genesis: old.genesis.clone(), + registry_address: None, + seed_peers: [].into(), + }) + { + anyhow::bail!("new config is equal to truncated old config, which means that it was sourced from the wrong endpoint"); + } + Ok(()) +} + /// Storage access methods for `zksync_core::consensus` module. #[derive(Debug)] pub struct ConsensusDal<'a, 'c> { @@ -31,8 +69,8 @@ pub struct ConsensusDal<'a, 'c> { pub enum InsertCertificateError { #[error("corresponding payload is missing")] MissingPayload, - #[error("certificate doesn't match the payload")] - PayloadMismatch, + #[error("certificate doesn't match the payload, payload = {0:?}")] + PayloadMismatch(Payload), #[error(transparent)] Dal(#[from] DalError), #[error(transparent)] @@ -94,6 +132,8 @@ impl ConsensusDal<'_, '_> { if got == want { return Ok(()); } + verify_config_transition(got, want)?; + // If genesis didn't change, just update the config. if got.genesis == want.genesis { let s = zksync_protobuf::serde::Serialize; @@ -112,30 +152,6 @@ impl ConsensusDal<'_, '_> { txn.commit().await?; return Ok(()); } - - // Verify the genesis change. - anyhow::ensure!( - got.genesis.chain_id == want.genesis.chain_id, - "changing chain_id is not allowed: old = {:?}, new = {:?}", - got.genesis.chain_id, - want.genesis.chain_id, - ); - // Note that it may happen that the fork number didn't change, - // in case the binary was updated to support more fields in genesis struct. - // In such a case, the old binary was not able to connect to the consensus network, - // because of the genesis hash mismatch. - // TODO: Perhaps it would be better to deny unknown fields in the genesis instead. - // It would require embedding the genesis either as a json string or protobuf bytes within - // the global config, so that the global config can be parsed with - // `deny_unknown_fields:false` while genesis would be parsed with - // `deny_unknown_fields:true`. - anyhow::ensure!( - got.genesis.fork_number <= want.genesis.fork_number, - "transition to a past fork is not allowed: old = {:?}, new = {:?}", - got.genesis.fork_number, - want.genesis.fork_number, - ); - want.genesis.verify().context("genesis.verify()")?; } // Reset the consensus state. @@ -512,7 +528,7 @@ impl ConsensusDal<'_, '_> { .await? .ok_or(E::MissingPayload)?; if header.payload != want_payload.encode().hash() { - return Err(E::PayloadMismatch); + return Err(E::PayloadMismatch(want_payload)); } sqlx::query!( r#" @@ -618,7 +634,7 @@ impl ConsensusDal<'_, '_> { pub async fn insert_batch_certificate( &mut self, cert: &attester::BatchQC, - ) -> Result<(), InsertCertificateError> { + ) -> anyhow::Result<()> { let cfg = self .global_config() .await @@ -636,9 +652,7 @@ impl ConsensusDal<'_, '_> { .context("batch()")? .context("batch is missing")?, ); - if cert.message.hash != hash { - return Err(InsertCertificateError::PayloadMismatch); - } + anyhow::ensure!(cert.message.hash == hash, "hash mismatch"); cert.verify(cfg.genesis.hash(), &committee) .context("cert.verify()")?; sqlx::query!( diff --git a/core/lib/dal/src/consensus_dal/tests.rs b/core/lib/dal/src/consensus_dal/tests.rs index 772e7b2bf5e7..694abc8508b6 100644 --- a/core/lib/dal/src/consensus_dal/tests.rs +++ b/core/lib/dal/src/consensus_dal/tests.rs @@ -131,6 +131,9 @@ async fn test_batch_certificate() { compressed_repeated_writes: None, zkporter_is_available: false, aux_commitments: None, + aggregation_root: rng.gen(), + local_root: rng.gen(), + state_diff_hash: rng.gen(), }, ) .await diff --git a/core/lib/dal/src/contract_verification_dal.rs b/core/lib/dal/src/contract_verification_dal.rs index 291e60a50d90..57bea5392cf8 100644 --- a/core/lib/dal/src/contract_verification_dal.rs +++ b/core/lib/dal/src/contract_verification_dal.rs @@ -1,28 +1,25 @@ #![doc = include_str!("../doc/ContractVerificationDal.md")] + use std::{ fmt::{Display, Formatter}, time::Duration, }; -use anyhow::Context as _; use sqlx::postgres::types::PgInterval; -use zksync_db_connection::connection::Connection; +use zksync_db_connection::{error::SqlxContext, instrument::InstrumentExt}; use zksync_types::{ + address_to_h256, contract_verification_api::{ - DeployContractCalldata, VerificationIncomingRequest, VerificationInfo, VerificationRequest, + VerificationIncomingRequest, VerificationInfo, VerificationRequest, VerificationRequestStatus, }, - Address, CONTRACT_DEPLOYER_ADDRESS, + web3, Address, CONTRACT_DEPLOYER_ADDRESS, H256, }; -use zksync_utils::address_to_h256; use zksync_vm_interface::VmEvent; -use crate::{models::storage_verification_request::StorageVerificationRequest, Core}; - -#[derive(Debug)] -pub struct ContractVerificationDal<'a, 'c> { - pub(crate) storage: &'a mut Connection<'c, Core>, -} +use crate::{ + models::storage_verification_request::StorageVerificationRequest, Connection, Core, DalResult, +}; #[derive(Debug)] enum Compiler { @@ -43,8 +40,24 @@ impl Display for Compiler { } } +#[derive(Debug)] +pub struct DeployedContractData { + pub bytecode_hash: H256, + /// Bytecode as persisted in Postgres (i.e., with additional padding for EVM bytecodes). + pub bytecode: Vec, + /// Recipient of the deployment transaction. + pub contract_address: Option
, + /// Call data for the deployment transaction. + pub calldata: Option>, +} + +#[derive(Debug)] +pub struct ContractVerificationDal<'a, 'c> { + pub(crate) storage: &'a mut Connection<'c, Core>, +} + impl ContractVerificationDal<'_, '_> { - pub async fn get_count_of_queued_verification_requests(&mut self) -> sqlx::Result { + pub async fn get_count_of_queued_verification_requests(&mut self) -> DalResult { sqlx::query!( r#" SELECT @@ -55,15 +68,16 @@ impl ContractVerificationDal<'_, '_> { status = 'queued' "# ) - .fetch_one(self.storage.conn()) + .instrument("get_count_of_queued_verification_requests") + .fetch_one(self.storage) .await .map(|row| row.count as usize) } pub async fn add_contract_verification_request( &mut self, - query: VerificationIncomingRequest, - ) -> sqlx::Result { + query: &VerificationIncomingRequest, + ) -> DalResult { sqlx::query!( r#" INSERT INTO @@ -90,16 +104,18 @@ impl ContractVerificationDal<'_, '_> { query.contract_address.as_bytes(), // Serialization should always succeed. serde_json::to_string(&query.source_code_data).unwrap(), - query.contract_name, + &query.contract_name, query.compiler_versions.zk_compiler_version(), query.compiler_versions.compiler_version(), query.optimization_used, - query.optimizer_mode, - query.constructor_arguments.0, + query.optimizer_mode.as_deref(), + query.constructor_arguments.0.as_slice(), query.is_system, query.force_evmla, ) - .fetch_one(self.storage.conn()) + .instrument("add_contract_verification_request") + .with_arg("address", &query.contract_address) + .fetch_one(self.storage) .await .map(|row| row.id as usize) } @@ -111,7 +127,7 @@ impl ContractVerificationDal<'_, '_> { pub async fn get_next_queued_verification_request( &mut self, processing_timeout: Duration, - ) -> sqlx::Result> { + ) -> DalResult> { let processing_timeout = PgInterval { months: 0, days: 0, @@ -160,7 +176,9 @@ impl ContractVerificationDal<'_, '_> { "#, &processing_timeout ) - .fetch_optional(self.storage.conn()) + .instrument("get_next_queued_verification_request") + .with_arg("processing_timeout", &processing_timeout) + .fetch_optional(self.storage) .await? .map(Into::into); Ok(result) @@ -170,12 +188,10 @@ impl ContractVerificationDal<'_, '_> { pub async fn save_verification_info( &mut self, verification_info: VerificationInfo, - ) -> anyhow::Result<()> { - let mut transaction = self - .storage - .start_transaction() - .await - .context("start_transaction()")?; + ) -> DalResult<()> { + let mut transaction = self.storage.start_transaction().await?; + let id = verification_info.request.id; + let address = verification_info.request.req.contract_address; sqlx::query!( r#" @@ -188,10 +204,12 @@ impl ContractVerificationDal<'_, '_> { "#, verification_info.request.id as i64, ) - .execute(transaction.conn()) + .instrument("save_verification_info#set_status") + .with_arg("id", &id) + .with_arg("address", &address) + .execute(&mut transaction) .await?; - let address = verification_info.request.req.contract_address; // Serialization should always succeed. let verification_info_json = serde_json::to_value(verification_info) .expect("Failed to serialize verification info into serde_json"); @@ -209,20 +227,22 @@ impl ContractVerificationDal<'_, '_> { address.as_bytes(), &verification_info_json ) - .execute(transaction.conn()) + .instrument("save_verification_info#insert") + .with_arg("id", &id) + .with_arg("address", &address) + .execute(&mut transaction) .await?; - transaction.commit().await.context("commit()")?; - Ok(()) + transaction.commit().await } pub async fn save_verification_error( &mut self, id: usize, - error: String, - compilation_errors: serde_json::Value, - panic_message: Option, - ) -> sqlx::Result<()> { + error: &str, + compilation_errors: &serde_json::Value, + panic_message: Option<&str>, + ) -> DalResult<()> { sqlx::query!( r#" UPDATE contract_verification_requests @@ -236,11 +256,14 @@ impl ContractVerificationDal<'_, '_> { id = $1 "#, id as i64, - error.as_str(), - &compilation_errors, + error, + compilation_errors, panic_message ) - .execute(self.storage.conn()) + .instrument("save_verification_error") + .with_arg("id", &id) + .with_arg("error", &error) + .execute(self.storage) .await?; Ok(()) } @@ -248,8 +271,8 @@ impl ContractVerificationDal<'_, '_> { pub async fn get_verification_request_status( &mut self, id: usize, - ) -> anyhow::Result> { - let Some(row) = sqlx::query!( + ) -> DalResult> { + sqlx::query!( r#" SELECT status, @@ -262,41 +285,46 @@ impl ContractVerificationDal<'_, '_> { "#, id as i64, ) - .fetch_optional(self.storage.conn()) - .await? - else { - return Ok(None); - }; - - let mut compilation_errors = vec![]; - if let Some(errors) = row.compilation_errors { - for value in errors.as_array().context("expected an array")? { - compilation_errors.push(value.as_str().context("expected string")?.to_string()); + .try_map(|row| { + let mut compilation_errors = vec![]; + if let Some(errors) = row.compilation_errors { + let serde_json::Value::Array(errors) = errors else { + return Err(anyhow::anyhow!("errors are not an array")) + .decode_column("compilation_errors")?; + }; + for value in errors { + let serde_json::Value::String(err) = value else { + return Err(anyhow::anyhow!("error is not a string")) + .decode_column("compilation_errors")?; + }; + compilation_errors.push(err.to_owned()); + } } - } - Ok(Some(VerificationRequestStatus { - status: row.status, - error: row.error, - compilation_errors: if compilation_errors.is_empty() { - None - } else { - Some(compilation_errors) - }, - })) + + Ok(VerificationRequestStatus { + status: row.status, + error: row.error, + compilation_errors: (!compilation_errors.is_empty()).then_some(compilation_errors), + }) + }) + .instrument("get_verification_request_status") + .with_arg("id", &id) + .fetch_optional(self.storage) + .await } /// Returns bytecode and calldata from the contract and the transaction that created it. pub async fn get_contract_info_for_verification( &mut self, address: Address, - ) -> anyhow::Result, DeployContractCalldata)>> { + ) -> DalResult> { let address_h256 = address_to_h256(&address); - - let Some(row) = sqlx::query!( + sqlx::query!( r#" SELECT + factory_deps.bytecode_hash, factory_deps.bytecode, - transactions.data AS "data?", + transactions.data -> 'calldata' AS "calldata?", transactions.contract_address AS "contract_address?" FROM ( @@ -327,30 +355,29 @@ impl ContractVerificationDal<'_, '_> { VmEvent::DEPLOY_EVENT_SIGNATURE.as_bytes(), address_h256.as_bytes(), ) - .fetch_optional(self.storage.conn()) - .await? - else { - return Ok(None); - }; - let calldata = match row.contract_address { - Some(contract_address) if contract_address == CONTRACT_DEPLOYER_ADDRESS.0.to_vec() => { - // `row.contract_address` and `row.data` are either both `None` or both `Some(_)`. - // In this arm it's checked that `row.contract_address` is `Some(_)`, so it's safe to unwrap `row.data`. - let data: serde_json::Value = row.data.context("data missing")?; - let calldata_str: String = serde_json::from_value( - data.get("calldata").context("calldata missing")?.clone(), - ) - .context("failed parsing calldata")?; - let calldata = hex::decode(&calldata_str[2..]).context("invalid calldata")?; - DeployContractCalldata::Deploy(calldata) - } - _ => DeployContractCalldata::Ignore, - }; - Ok(Some((row.bytecode, calldata))) + .try_map(|row| { + Ok(DeployedContractData { + bytecode_hash: H256::from_slice(&row.bytecode_hash), + bytecode: row.bytecode, + contract_address: row.contract_address.as_deref().map(Address::from_slice), + calldata: row + .calldata + .map(|calldata| { + serde_json::from_value::(calldata) + .decode_column("calldata") + .map(|bytes| bytes.0) + }) + .transpose()?, + }) + }) + .instrument("get_contract_info_for_verification") + .with_arg("address", &address) + .fetch_optional(self.storage) + .await } /// Returns true if the contract has a stored contracts_verification_info. - pub async fn is_contract_verified(&mut self, address: Address) -> sqlx::Result { + pub async fn is_contract_verified(&mut self, address: Address) -> DalResult { let count = sqlx::query!( r#" SELECT @@ -362,13 +389,15 @@ impl ContractVerificationDal<'_, '_> { "#, address.as_bytes() ) - .fetch_one(self.storage.conn()) + .instrument("is_contract_verified") + .with_arg("address", &address) + .fetch_one(self.storage) .await? .count; Ok(count > 0) } - async fn get_compiler_versions(&mut self, compiler: Compiler) -> sqlx::Result> { + async fn get_compiler_versions(&mut self, compiler: Compiler) -> DalResult> { let compiler = format!("{compiler}"); let versions: Vec<_> = sqlx::query!( r#" @@ -383,7 +412,9 @@ impl ContractVerificationDal<'_, '_> { "#, &compiler ) - .fetch_all(self.storage.conn()) + .instrument("get_compiler_versions") + .with_arg("compiler", &compiler) + .fetch_all(self.storage) .await? .into_iter() .map(|row| row.version) @@ -391,32 +422,28 @@ impl ContractVerificationDal<'_, '_> { Ok(versions) } - pub async fn get_zksolc_versions(&mut self) -> sqlx::Result> { + pub async fn get_zksolc_versions(&mut self) -> DalResult> { self.get_compiler_versions(Compiler::ZkSolc).await } - pub async fn get_solc_versions(&mut self) -> sqlx::Result> { + pub async fn get_solc_versions(&mut self) -> DalResult> { self.get_compiler_versions(Compiler::Solc).await } - pub async fn get_zkvyper_versions(&mut self) -> sqlx::Result> { + pub async fn get_zkvyper_versions(&mut self) -> DalResult> { self.get_compiler_versions(Compiler::ZkVyper).await } - pub async fn get_vyper_versions(&mut self) -> sqlx::Result> { + pub async fn get_vyper_versions(&mut self) -> DalResult> { self.get_compiler_versions(Compiler::Vyper).await } async fn set_compiler_versions( &mut self, compiler: Compiler, - versions: Vec, - ) -> anyhow::Result<()> { - let mut transaction = self - .storage - .start_transaction() - .await - .context("start_transaction")?; + versions: &[String], + ) -> DalResult<()> { + let mut transaction = self.storage.start_transaction().await?; let compiler = format!("{compiler}"); sqlx::query!( @@ -427,7 +454,9 @@ impl ContractVerificationDal<'_, '_> { "#, &compiler ) - .execute(transaction.conn()) + .instrument("set_compiler_versions#delete") + .with_arg("compiler", &compiler) + .execute(&mut transaction) .await?; sqlx::query!( @@ -443,34 +472,36 @@ impl ContractVerificationDal<'_, '_> { UNNEST($1::TEXT []) AS u (version) ON CONFLICT (version, compiler) DO NOTHING "#, - &versions, + versions, &compiler, ) - .execute(transaction.conn()) + .instrument("set_compiler_versions#insert") + .with_arg("compiler", &compiler) + .with_arg("versions.len", &versions.len()) + .execute(&mut transaction) .await?; - transaction.commit().await.context("commit()")?; - Ok(()) + transaction.commit().await } - pub async fn set_zksolc_versions(&mut self, versions: Vec) -> anyhow::Result<()> { + pub async fn set_zksolc_versions(&mut self, versions: &[String]) -> DalResult<()> { self.set_compiler_versions(Compiler::ZkSolc, versions).await } - pub async fn set_solc_versions(&mut self, versions: Vec) -> anyhow::Result<()> { + pub async fn set_solc_versions(&mut self, versions: &[String]) -> DalResult<()> { self.set_compiler_versions(Compiler::Solc, versions).await } - pub async fn set_zkvyper_versions(&mut self, versions: Vec) -> anyhow::Result<()> { + pub async fn set_zkvyper_versions(&mut self, versions: &[String]) -> DalResult<()> { self.set_compiler_versions(Compiler::ZkVyper, versions) .await } - pub async fn set_vyper_versions(&mut self, versions: Vec) -> anyhow::Result<()> { + pub async fn set_vyper_versions(&mut self, versions: &[String]) -> DalResult<()> { self.set_compiler_versions(Compiler::Vyper, versions).await } - pub async fn get_all_successful_requests(&mut self) -> sqlx::Result> { + pub async fn get_all_successful_requests(&mut self) -> DalResult> { let result = sqlx::query_as!( StorageVerificationRequest, r#" @@ -494,7 +525,8 @@ impl ContractVerificationDal<'_, '_> { id "#, ) - .fetch_all(self.storage.conn()) + .instrument("get_all_successful_requests") + .fetch_all(self.storage) .await? .into_iter() .map(Into::into) @@ -505,8 +537,8 @@ impl ContractVerificationDal<'_, '_> { pub async fn get_contract_verification_info( &mut self, address: Address, - ) -> anyhow::Result> { - let Some(row) = sqlx::query!( + ) -> DalResult> { + Ok(sqlx::query!( r#" SELECT verification_info @@ -517,14 +549,164 @@ impl ContractVerificationDal<'_, '_> { "#, address.as_bytes(), ) - .fetch_optional(self.storage.conn()) + .try_map(|row| { + row.verification_info + .map(|info| serde_json::from_value(info).decode_column("verification_info")) + .transpose() + }) + .instrument("get_contract_verification_info") + .with_arg("address", &address) + .fetch_optional(self.storage) .await? - else { - return Ok(None); + .flatten()) + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use zksync_types::{ + bytecode::BytecodeHash, + contract_verification_api::{CompilerVersions, SourceCodeData}, + tx::IncludedTxLocation, + Execute, L1BatchNumber, L2BlockNumber, ProtocolVersion, + }; + use zksync_vm_interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; + + use super::*; + use crate::{ + tests::{create_l2_block_header, mock_l2_transaction}, + ConnectionPool, CoreDal, + }; + + #[tokio::test] + async fn getting_contract_info_for_verification() { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + conn.blocks_dal() + .insert_l2_block(&create_l2_block_header(0)) + .await + .unwrap(); + + // Add a transaction, its bytecode and the bytecode deployment event. + let deployed_address = Address::repeat_byte(12); + let mut tx = mock_l2_transaction(); + let bytecode = vec![1; 32]; + let bytecode_hash = BytecodeHash::for_bytecode(&bytecode).value(); + tx.execute = Execute::for_deploy(H256::zero(), bytecode.clone(), &[]); + conn.transactions_dal() + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) + .await + .unwrap(); + conn.factory_deps_dal() + .insert_factory_deps( + L2BlockNumber(0), + &HashMap::from([(bytecode_hash, bytecode.clone())]), + ) + .await + .unwrap(); + let location = IncludedTxLocation { + tx_hash: tx.hash(), + tx_index_in_l2_block: 0, + tx_initiator_address: tx.initiator_account(), }; - let Some(info) = row.verification_info else { - return Ok(None); + let deploy_event = VmEvent { + location: (L1BatchNumber(0), 0), + address: CONTRACT_DEPLOYER_ADDRESS, + indexed_topics: vec![ + VmEvent::DEPLOY_EVENT_SIGNATURE, + address_to_h256(&tx.initiator_account()), + bytecode_hash, + address_to_h256(&deployed_address), + ], + value: vec![], }; - Ok(Some(serde_json::from_value(info).context("invalid info")?)) + conn.events_dal() + .save_events(L2BlockNumber(0), &[(location, vec![&deploy_event])]) + .await + .unwrap(); + + let contract = conn + .contract_verification_dal() + .get_contract_info_for_verification(deployed_address) + .await + .unwrap() + .expect("no info"); + assert_eq!(contract.bytecode_hash, bytecode_hash); + assert_eq!(contract.bytecode, bytecode); + assert_eq!(contract.contract_address, Some(CONTRACT_DEPLOYER_ADDRESS)); + assert_eq!(contract.calldata.unwrap(), tx.execute.calldata); + } + + async fn test_working_with_verification_requests(zksolc: Option<&str>) { + let request = VerificationIncomingRequest { + contract_address: Address::repeat_byte(11), + source_code_data: SourceCodeData::SolSingleFile("contract Test {}".to_owned()), + contract_name: "Test".to_string(), + compiler_versions: CompilerVersions::Solc { + compiler_zksolc_version: zksolc.map(str::to_owned), + compiler_solc_version: "0.8.27".to_owned(), + }, + optimization_used: true, + optimizer_mode: Some("z".to_owned()), + constructor_arguments: web3::Bytes(b"test".to_vec()), + is_system: false, + force_evmla: true, + }; + + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + let id = conn + .contract_verification_dal() + .add_contract_verification_request(&request) + .await + .unwrap(); + + let status = conn + .contract_verification_dal() + .get_verification_request_status(id) + .await + .unwrap() + .expect("request not persisted"); + assert_eq!(status.status, "queued"); + + let req = conn + .contract_verification_dal() + .get_next_queued_verification_request(Duration::from_secs(600)) + .await + .unwrap() + .expect("request not queued"); + assert_eq!(req.id, id); + assert_eq!(req.req.contract_address, request.contract_address); + assert_eq!(req.req.contract_name, request.contract_name); + assert_eq!(req.req.compiler_versions, request.compiler_versions); + assert_eq!(req.req.optimization_used, request.optimization_used); + assert_eq!(req.req.optimizer_mode, request.optimizer_mode); + assert_eq!(req.req.constructor_arguments, request.constructor_arguments); + assert_eq!(req.req.is_system, request.is_system); + assert_eq!(req.req.force_evmla, request.force_evmla); + + let maybe_req = conn + .contract_verification_dal() + .get_next_queued_verification_request(Duration::from_secs(600)) + .await + .unwrap(); + assert!(maybe_req.is_none()); + } + + #[tokio::test] + async fn working_with_verification_requests() { + test_working_with_verification_requests(None).await; + test_working_with_verification_requests(Some("1.5.7")).await; } } diff --git a/core/lib/dal/src/eth_watcher_dal.rs b/core/lib/dal/src/eth_watcher_dal.rs index bdfc7f24c7b5..062ad47219d8 100644 --- a/core/lib/dal/src/eth_watcher_dal.rs +++ b/core/lib/dal/src/eth_watcher_dal.rs @@ -107,7 +107,7 @@ mod tests { async fn test_get_or_set_next_block_to_process_with_different_event_types() { let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); - let mut dal = conn.processed_events_dal(); + let mut dal = conn.eth_watcher_dal(); // Test with ProtocolUpgrades let next_block = dal diff --git a/core/lib/dal/src/factory_deps_dal.rs b/core/lib/dal/src/factory_deps_dal.rs index 857e2973ae33..424d708da241 100644 --- a/core/lib/dal/src/factory_deps_dal.rs +++ b/core/lib/dal/src/factory_deps_dal.rs @@ -4,7 +4,6 @@ use anyhow::Context as _; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; use zksync_types::{L2BlockNumber, H256, U256}; -use zksync_utils::{bytes_to_be_words, bytes_to_chunks}; use crate::Core; @@ -102,7 +101,7 @@ impl FactoryDepsDal<'_, '_> { .context("failed loading bootloader code")? .with_context(|| format!("bootloader code with hash {bootloader_hash:?} should be present in the database"))?; let bootloader_code = SystemContractCode { - code: bytes_to_be_words(bootloader_bytecode), + code: bootloader_bytecode, hash: bootloader_hash, }; @@ -113,7 +112,7 @@ impl FactoryDepsDal<'_, '_> { .with_context(|| format!("default account code with hash {default_aa_hash:?} should be present in the database"))?; let default_aa_code = SystemContractCode { - code: bytes_to_be_words(default_aa_bytecode), + code: default_aa_bytecode, hash: default_aa_hash, }; @@ -125,7 +124,7 @@ impl FactoryDepsDal<'_, '_> { .with_context(|| format!("EVM emulator code with hash {evm_emulator_hash:?} should be present in the database"))?; Some(SystemContractCode { - code: bytes_to_be_words(evm_emulator_bytecode), + code: evm_emulator_bytecode, hash: evm_emulator_hash, }) } else { @@ -140,10 +139,7 @@ impl FactoryDepsDal<'_, '_> { } /// Returns bytecodes for factory deps with the specified `hashes`. - pub async fn get_factory_deps( - &mut self, - hashes: &HashSet, - ) -> HashMap> { + pub async fn get_factory_deps(&mut self, hashes: &HashSet) -> HashMap> { let hashes_as_bytes: Vec<_> = hashes.iter().map(H256::as_bytes).collect(); sqlx::query!( @@ -162,12 +158,7 @@ impl FactoryDepsDal<'_, '_> { .await .unwrap() .into_iter() - .map(|row| { - ( - U256::from_big_endian(&row.bytecode_hash), - bytes_to_chunks(&row.bytecode), - ) - }) + .map(|row| (U256::from_big_endian(&row.bytecode_hash), row.bytecode)) .collect() } diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index f0d2f0c16711..20b428adec44 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -23,8 +23,7 @@ use crate::{ snapshots_dal::SnapshotsDal, storage_logs_dal::StorageLogsDal, storage_logs_dedup_dal::StorageLogsDedupDal, storage_web3_dal::StorageWeb3Dal, sync_dal::SyncDal, system_dal::SystemDal, tee_proof_generation_dal::TeeProofGenerationDal, - tee_verifier_input_producer_dal::TeeVerifierInputProducerDal, tokens_dal::TokensDal, - tokens_web3_dal::TokensWeb3Dal, transactions_dal::TransactionsDal, + tokens_dal::TokensDal, tokens_web3_dal::TokensWeb3Dal, transactions_dal::TransactionsDal, transactions_web3_dal::TransactionsWeb3Dal, vm_runner_dal::VmRunnerDal, }; @@ -56,7 +55,6 @@ pub mod storage_web3_dal; pub mod sync_dal; pub mod system_dal; pub mod tee_proof_generation_dal; -pub mod tee_verifier_input_producer_dal; pub mod tokens_dal; pub mod tokens_web3_dal; pub mod transactions_dal; @@ -81,8 +79,6 @@ where fn transactions_web3_dal(&mut self) -> TransactionsWeb3Dal<'_, 'a>; - fn tee_verifier_input_producer_dal(&mut self) -> TeeVerifierInputProducerDal<'_, 'a>; - fn blocks_dal(&mut self) -> BlocksDal<'_, 'a>; fn blocks_web3_dal(&mut self) -> BlocksWeb3Dal<'_, 'a>; @@ -135,7 +131,7 @@ where fn base_token_dal(&mut self) -> BaseTokenDal<'_, 'a>; - fn processed_events_dal(&mut self) -> EthWatcherDal<'_, 'a>; + fn eth_watcher_dal(&mut self) -> EthWatcherDal<'_, 'a>; } #[derive(Clone, Debug)] @@ -155,10 +151,6 @@ impl<'a> CoreDal<'a> for Connection<'a, Core> { TransactionsWeb3Dal { storage: self } } - fn tee_verifier_input_producer_dal(&mut self) -> TeeVerifierInputProducerDal<'_, 'a> { - TeeVerifierInputProducerDal { storage: self } - } - fn blocks_dal(&mut self) -> BlocksDal<'_, 'a> { BlocksDal { storage: self } } @@ -263,7 +255,7 @@ impl<'a> CoreDal<'a> for Connection<'a, Core> { BaseTokenDal { storage: self } } - fn processed_events_dal(&mut self) -> EthWatcherDal<'_, 'a> { + fn eth_watcher_dal(&mut self) -> EthWatcherDal<'_, 'a> { EthWatcherDal { storage: self } } } diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index 12e41ac780ad..885dcd46f41f 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -1,6 +1,8 @@ pub mod storage_block; + +use bigdecimal::{num_bigint::BigUint, BigDecimal}; use zksync_db_connection::error::SqlxContext; -use zksync_types::ProtocolVersionId; +use zksync_types::{ProtocolVersionId, U256}; mod call; pub mod storage_base_token_ratio; @@ -24,3 +26,26 @@ pub(crate) fn parse_protocol_version(raw: i32) -> sqlx::Result BigDecimal { + let mut u32_digits = vec![0_u32; 8]; + // `u64_digit`s from `U256` are little-endian + for (i, &u64_digit) in value.0.iter().enumerate() { + u32_digits[2 * i] = u64_digit as u32; + u32_digits[2 * i + 1] = (u64_digit >> 32) as u32; + } + let value = BigUint::new(u32_digits); + BigDecimal::new(value.into(), 0) +} + +/// Converts `BigUint` value into the corresponding `U256` value. +fn biguint_to_u256(value: BigUint) -> U256 { + let bytes = value.to_bytes_le(); + U256::from_little_endian(&bytes) +} + +/// Converts `BigDecimal` value into the corresponding `U256` value. +pub(crate) fn bigdecimal_to_u256(value: BigDecimal) -> U256 { + let bigint = value.with_scale(0).into_bigint_and_exponent().0; + biguint_to_u256(bigint.to_biguint().unwrap()) +} diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 3bb433a05cf8..159ed71cc3e9 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -7,7 +7,7 @@ use zksync_contracts::BaseSystemContractsHashes; use zksync_types::{ api, block::{L1BatchHeader, L2BlockHeader, UnsealedL1BatchHeader}, - commitment::{L1BatchMetaParameters, L1BatchMetadata}, + commitment::{L1BatchCommitmentMode, L1BatchMetaParameters, L1BatchMetadata, PubdataParams}, fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput, PubdataIndependentBatchFeeModelInput}, l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}, Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, @@ -155,6 +155,10 @@ pub(crate) struct StorageL1Batch { pub bootloader_initial_content_commitment: Option>, pub pubdata_input: Option>, pub fee_address: Vec, + pub aggregation_root: Option>, + pub local_root: Option>, + pub state_diff_hash: Option>, + pub inclusion_data: Option>, } impl StorageL1Batch { @@ -263,6 +267,10 @@ impl TryFrom for L1BatchMetadata { bootloader_initial_content_commitment: batch .bootloader_initial_content_commitment .map(|v| H256::from_slice(&v)), + state_diff_hash: batch.state_diff_hash.map(|v| H256::from_slice(&v)), + local_root: batch.local_root.map(|v| H256::from_slice(&v)), + aggregation_root: batch.aggregation_root.map(|v| H256::from_slice(&v)), + da_inclusion_data: batch.inclusion_data, }) } } @@ -485,6 +493,8 @@ pub(crate) struct StorageL2BlockHeader { /// This value should bound the maximal amount of gas that can be spent by transactions in the miniblock. pub gas_limit: Option, pub logs_bloom: Option>, + pub l2_da_validator_address: Vec, + pub pubdata_type: String, } impl From for L2BlockHeader { @@ -532,6 +542,10 @@ impl From for L2BlockHeader { .logs_bloom .map(|b| Bloom::from_slice(&b)) .unwrap_or_default(), + pubdata_params: PubdataParams { + l2_da_validator_address: Address::from_slice(&row.l2_da_validator_address), + pubdata_type: L1BatchCommitmentMode::from_str(&row.pubdata_type).unwrap(), + }, } } } diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index 7a4ebe074fe0..3f80f52c56eb 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -1,7 +1,11 @@ +use std::str::FromStr; + use zksync_contracts::BaseSystemContractsHashes; use zksync_db_connection::error::SqlxContext; use zksync_types::{ - api::en, parse_h160, parse_h256, parse_h256_opt, Address, L1BatchNumber, L2BlockNumber, + api::en, + commitment::{L1BatchCommitmentMode, PubdataParams}, + parse_h160, parse_h256, parse_h256_opt, Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, Transaction, H256, }; @@ -25,6 +29,8 @@ pub(crate) struct StorageSyncBlock { pub protocol_version: i32, pub virtual_blocks: i64, pub hash: Vec, + pub l2_da_validator_address: Vec, + pub pubdata_type: String, } pub(crate) struct SyncBlock { @@ -40,6 +46,7 @@ pub(crate) struct SyncBlock { pub virtual_blocks: u32, pub hash: H256, pub protocol_version: ProtocolVersionId, + pub pubdata_params: PubdataParams, } impl TryFrom for SyncBlock { @@ -89,6 +96,12 @@ impl TryFrom for SyncBlock { .decode_column("virtual_blocks")?, hash: parse_h256(&block.hash).decode_column("hash")?, protocol_version: parse_protocol_version(block.protocol_version)?, + pubdata_params: PubdataParams { + pubdata_type: L1BatchCommitmentMode::from_str(&block.pubdata_type) + .decode_column("Invalid pubdata type")?, + l2_da_validator_address: parse_h160(&block.l2_da_validator_address) + .decode_column("l2_da_validator_address")?, + }, }) } } @@ -109,6 +122,7 @@ impl SyncBlock { virtual_blocks: Some(self.virtual_blocks), hash: Some(self.hash), protocol_version: self.protocol_version, + pubdata_params: Some(self.pubdata_params), } } @@ -125,6 +139,7 @@ impl SyncBlock { operator_address: self.fee_account_address, transactions, last_in_batch: self.last_in_batch, + pubdata_params: self.pubdata_params, } } } diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 78daaebb335e..cceebc85cf2b 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -6,21 +6,21 @@ use sqlx::types::chrono::{DateTime, NaiveDateTime, Utc}; use zksync_types::{ api::{self, TransactionDetails, TransactionReceipt, TransactionStatus}, fee::Fee, + h256_to_address, l1::{OpProcessingType, PriorityQueueType}, l2::TransactionType, protocol_upgrade::ProtocolUpgradeTxCommonData, transaction_request::PaymasterParams, web3::Bytes, Address, Execute, ExecuteTransactionCommon, L1TxCommonData, L2ChainId, L2TxCommonData, Nonce, - PackedEthSignature, PriorityOpId, ProtocolVersionId, Transaction, EIP_1559_TX_TYPE, - EIP_2930_TX_TYPE, EIP_712_TX_TYPE, H160, H256, PRIORITY_OPERATION_L2_TX_TYPE, - PROTOCOL_UPGRADE_TX_TYPE, U256, U64, + PackedEthSignature, PriorityOpId, ProtocolVersionId, Transaction, + TransactionTimeRangeConstraint, EIP_1559_TX_TYPE, EIP_2930_TX_TYPE, EIP_712_TX_TYPE, H160, + H256, PRIORITY_OPERATION_L2_TX_TYPE, PROTOCOL_UPGRADE_TX_TYPE, U256, U64, }; -use zksync_utils::{bigdecimal_to_u256, h256_to_account_address}; use zksync_vm_interface::Call; use super::call::{LegacyCall, LegacyMixedCall}; -use crate::BigDecimal; +use crate::{models::bigdecimal_to_u256, BigDecimal}; #[derive(Debug, Clone, sqlx::FromRow)] #[cfg_attr(test, derive(Default))] @@ -68,6 +68,9 @@ pub struct StorageTransaction { pub created_at: NaiveDateTime, pub updated_at: NaiveDateTime, + pub timestamp_asserter_range_start: Option, + pub timestamp_asserter_range_end: Option, + // DEPRECATED. pub l1_block_number: Option, } @@ -321,6 +324,18 @@ impl From for Transaction { } } +impl From<&StorageTransaction> for TransactionTimeRangeConstraint { + fn from(tx: &StorageTransaction) -> Self { + Self { + timestamp_asserter_range: tx.timestamp_asserter_range_start.and_then(|start| { + tx.timestamp_asserter_range_end.map(|end| { + (start.and_utc().timestamp() as u64)..(end.and_utc().timestamp() as u64) + }) + }), + } + } +} + #[derive(sqlx::FromRow)] pub(crate) struct StorageTransactionReceipt { pub error: Option, @@ -388,11 +403,10 @@ impl From for TransactionReceipt { ), contract_address: storage_receipt .contract_address - .map(|addr| h256_to_account_address(&H256::from_slice(&addr))), + .map(|addr| h256_to_address(&H256::from_slice(&addr))), logs: vec![], l2_to_l1_logs: vec![], status, - root: block_hash, logs_bloom: Default::default(), // Even though the Rust SDK recommends us to supply "None" for legacy transactions // we always supply some number anyway to have the same behavior as most popular RPCs @@ -527,6 +541,13 @@ impl StorageApiTransaction { .or_else(|| self.max_fee_per_gas.clone()) .unwrap_or_else(BigDecimal::zero), }; + // Legacy transactions are not supposed to have `yParity` and are reliant on `v` instead. + // Other transactions are required to have `yParity` which replaces the deprecated `v` value + // (still included for backwards compatibility). + let y_parity = match self.tx_format { + None | Some(0) => None, + _ => signature.as_ref().map(|s| U64::from(s.v())), + }; let mut tx = api::Transaction { hash: H256::from_slice(&self.tx_hash), nonce: U256::from(self.nonce.unwrap_or(0) as u64), @@ -539,6 +560,7 @@ impl StorageApiTransaction { gas_price: Some(bigdecimal_to_u256(gas_price)), gas: bigdecimal_to_u256(self.gas_limit.unwrap_or_else(BigDecimal::zero)), input: serde_json::from_value(self.calldata).expect("incorrect calldata in Postgres"), + y_parity, v: signature.as_ref().map(|s| U64::from(s.v())), r: signature.as_ref().map(|s| U256::from(s.r())), s: signature.as_ref().map(|s| U256::from(s.s())), diff --git a/core/lib/dal/src/models/storage_verification_request.rs b/core/lib/dal/src/models/storage_verification_request.rs index 61895fab76d3..ae4718e41290 100644 --- a/core/lib/dal/src/models/storage_verification_request.rs +++ b/core/lib/dal/src/models/storage_verification_request.rs @@ -12,7 +12,7 @@ pub struct StorageVerificationRequest { pub contract_address: Vec, pub source_code: String, pub contract_name: String, - pub zk_compiler_version: String, + pub zk_compiler_version: Option, pub compiler_version: String, pub optimization_used: bool, pub optimizer_mode: Option, diff --git a/core/lib/dal/src/models/tests.rs b/core/lib/dal/src/models/tests.rs index b4949dc101d6..c30c84702b13 100644 --- a/core/lib/dal/src/models/tests.rs +++ b/core/lib/dal/src/models/tests.rs @@ -1,4 +1,6 @@ +use bigdecimal::num_bigint::BigInt; use chrono::Utc; +use rand::{prelude::StdRng, Rng, SeedableRng}; use zksync_types::{ fee::Fee, l1::{OpProcessingType, PriorityQueueType}, @@ -7,9 +9,9 @@ use zksync_types::{ Address, Execute, ExecuteTransactionCommon, Transaction, EIP_1559_TX_TYPE, EIP_2930_TX_TYPE, EIP_712_TX_TYPE, H160, H256, PRIORITY_OPERATION_L2_TX_TYPE, PROTOCOL_UPGRADE_TX_TYPE, U256, }; -use zksync_utils::bigdecimal_to_u256; -use crate::{models::storage_transaction::StorageTransaction, BigDecimal}; +use super::*; +use crate::models::storage_transaction::StorageTransaction; fn default_execute() -> Execute { Execute { @@ -96,6 +98,49 @@ fn l2_storage_tx(tx_format: i32) -> StorageTransaction { } } +#[test] +fn test_u256_to_bigdecimal() { + const RNG_SEED: u64 = 123; + + let mut rng = StdRng::seed_from_u64(RNG_SEED); + // Small values. + for _ in 0..10_000 { + let value: u64 = rng.gen(); + let expected = BigDecimal::from(value); + assert_eq!(u256_to_big_decimal(value.into()), expected); + } + + // Arbitrary values + for _ in 0..10_000 { + let u64_digits: [u64; 4] = rng.gen(); + let value = u64_digits + .iter() + .enumerate() + .map(|(i, &digit)| U256::from(digit) << (i * 64)) + .fold(U256::zero(), |acc, x| acc + x); + let expected_value = u64_digits + .iter() + .enumerate() + .map(|(i, &digit)| BigInt::from(digit) << (i * 64)) + .fold(BigInt::from(0), |acc, x| acc + x); + assert_eq!( + u256_to_big_decimal(value), + BigDecimal::new(expected_value, 0) + ); + } +} + +#[test] +fn test_bigdecimal_to_u256() { + let value = BigDecimal::from(100u32); + let expected = U256::from(100u32); + assert_eq!(bigdecimal_to_u256(value), expected); + + let value = BigDecimal::new(BigInt::from(100), -2); + let expected = U256::from(10000u32); + assert_eq!(bigdecimal_to_u256(value), expected); +} + #[test] fn storage_tx_to_l1_tx() { let stx = l1_storage_tx(); diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index 3382d8c836e5..fcc756e30069 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -190,6 +190,43 @@ impl ProtocolVersionsDal<'_, '_> { ProtocolVersionId::try_from(row.id as u16).map_err(|err| sqlx::Error::Decode(err.into())) } + /// Returns base system contracts' hashes. Prefer `load_base_system_contracts_by_version_id` if + /// you also want to load the contracts themselves AND expect the contracts to be in the DB + /// already. + pub async fn get_base_system_contract_hashes_by_version_id( + &mut self, + version_id: u16, + ) -> anyhow::Result> { + let row = sqlx::query!( + r#" + SELECT + bootloader_code_hash, + default_account_code_hash, + evm_emulator_code_hash + FROM + protocol_versions + WHERE + id = $1 + "#, + i32::from(version_id) + ) + .instrument("get_base_system_contract_hashes_by_version_id") + .with_arg("version_id", &version_id) + .fetch_optional(self.storage) + .await + .context("cannot fetch system contract hashes")?; + + Ok(if let Some(row) = row { + Some(BaseSystemContractsHashes { + bootloader: H256::from_slice(&row.bootloader_code_hash), + default_aa: H256::from_slice(&row.default_account_code_hash), + evm_emulator: row.evm_emulator_code_hash.as_deref().map(H256::from_slice), + }) + } else { + None + }) + } + pub async fn load_base_system_contracts_by_version_id( &mut self, version_id: u16, @@ -207,7 +244,9 @@ impl ProtocolVersionsDal<'_, '_> { "#, i32::from(version_id) ) - .fetch_optional(self.storage.conn()) + .instrument("load_base_system_contracts_by_version_id") + .with_arg("version_id", &version_id) + .fetch_optional(self.storage) .await .context("cannot fetch system contract hashes")?; diff --git a/core/lib/dal/src/pruning_dal/tests.rs b/core/lib/dal/src/pruning_dal/tests.rs index 4f94ff7f63d3..70dda48d8c82 100644 --- a/core/lib/dal/src/pruning_dal/tests.rs +++ b/core/lib/dal/src/pruning_dal/tests.rs @@ -5,7 +5,7 @@ use zksync_types::{ tx::IncludedTxLocation, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersion, ProtocolVersionId, StorageKey, StorageLog, H256, }; -use zksync_vm_interface::TransactionExecutionMetrics; +use zksync_vm_interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use super::*; use crate::{ @@ -457,7 +457,11 @@ async fn transactions_are_handled_correctly_after_pruning() { let tx = mock_l2_transaction(); let tx_hash = tx.hash(); conn.transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); conn.blocks_dal() diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index adad6eb7e1db..1675d76643c2 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -225,60 +225,13 @@ impl StorageLogsDal<'_, '_> { Ok(()) } - pub async fn is_contract_deployed_at_address(&mut self, address: Address) -> bool { - let hashed_key = get_code_key(&address).hashed_key(); - let row = sqlx::query!( - r#" - SELECT - COUNT(*) AS "count!" - FROM - ( - SELECT - * - FROM - storage_logs - WHERE - hashed_key = $1 - AND miniblock_number <= COALESCE( - ( - SELECT - MAX(number) - FROM - miniblocks - ), - ( - SELECT - miniblock_number - FROM - snapshot_recovery - ) - ) - ORDER BY - miniblock_number DESC, - operation_number DESC - LIMIT - 1 - ) sl - WHERE - sl.value != $2 - "#, - hashed_key.as_bytes(), - FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH.as_bytes(), - ) - .fetch_one(self.storage.conn()) - .await - .unwrap(); - - row.count > 0 - } - /// Returns addresses and the corresponding deployment L2 block numbers among the specified contract /// `addresses`. `at_l2_block` allows filtering deployment by L2 blocks. pub async fn filter_deployed_contracts( &mut self, addresses: impl Iterator, at_l2_block: Option, - ) -> DalResult> { + ) -> DalResult> { let (bytecode_hashed_keys, address_by_hashed_key): (Vec<_>, HashMap<_, _>) = addresses .map(|address| { let hashed_key = get_code_key(&address).hashed_key().0; @@ -330,12 +283,13 @@ impl StorageLogsDal<'_, '_> { .await?; let deployment_data = rows.into_iter().filter_map(|row| { - if row.value == FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH.as_bytes() { + let bytecode_hash = H256::from_slice(&row.value); + if bytecode_hash == FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH { return None; } let l2_block_number = L2BlockNumber(row.miniblock_number as u32); let address = address_by_hashed_key[row.hashed_key.as_slice()]; - Some((address, l2_block_number)) + Some((address, (l2_block_number, bytecode_hash))) }); Ok(deployment_data.collect()) } @@ -727,7 +681,7 @@ impl StorageLogsDal<'_, '_> { FROM storage_logs WHERE - storage_logs.miniblock_number = $1 + storage_logs.miniblock_number <= $1 AND storage_logs.hashed_key >= u.start_key AND storage_logs.hashed_key <= u.end_key ORDER BY @@ -784,7 +738,7 @@ impl StorageLogsDal<'_, '_> { storage_logs INNER JOIN initial_writes ON storage_logs.hashed_key = initial_writes.hashed_key WHERE - storage_logs.miniblock_number = $1 + storage_logs.miniblock_number <= $1 AND storage_logs.hashed_key >= $2::bytea AND storage_logs.hashed_key <= $3::bytea ORDER BY @@ -1168,8 +1122,9 @@ mod tests { async fn filtering_deployed_contracts() { let contract_address = Address::repeat_byte(1); let other_contract_address = Address::repeat_byte(23); + let bytecode_hash = H256::repeat_byte(0xff); let successful_deployment = - StorageLog::new_write_log(get_code_key(&contract_address), H256::repeat_byte(0xff)); + StorageLog::new_write_log(get_code_key(&contract_address), bytecode_hash); let failed_deployment = StorageLog::new_write_log( get_code_key(&contract_address), FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, @@ -1233,7 +1188,7 @@ mod tests { .unwrap(); assert_eq!( deployed_map, - HashMap::from([(contract_address, L2BlockNumber(2))]) + HashMap::from([(contract_address, (L2BlockNumber(2), bytecode_hash))]) ); } @@ -1268,7 +1223,7 @@ mod tests { .unwrap(); assert_eq!( deployed_map, - HashMap::from([(contract_address, L2BlockNumber(2))]) + HashMap::from([(contract_address, (L2BlockNumber(2), bytecode_hash))]) ); for new_l2_block in [None, Some(L2BlockNumber(3))] { @@ -1283,8 +1238,8 @@ mod tests { assert_eq!( deployed_map, HashMap::from([ - (contract_address, L2BlockNumber(2)), - (other_contract_address, L2BlockNumber(3)), + (contract_address, (L2BlockNumber(2), bytecode_hash)), + (other_contract_address, (L2BlockNumber(3), bytecode_hash)), ]) ); } diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index 10d2cfe61525..794f49c59ac4 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -6,12 +6,11 @@ use zksync_db_connection::{ instrument::{InstrumentExt, Instrumented}, }; use zksync_types::{ - get_code_key, get_nonce_key, + get_code_key, get_nonce_key, h256_to_u256, utils::{decompose_full_nonce, storage_key_for_standard_token_balance}, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, Nonce, StorageKey, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H256, U256, }; -use zksync_utils::h256_to_u256; use crate::{models::storage_block::ResolvedL1BatchForL2Block, Core, CoreDal}; diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index 265c61354887..4372a83f1feb 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -56,7 +56,9 @@ impl SyncDal<'_, '_> { miniblocks.virtual_blocks, miniblocks.hash, miniblocks.protocol_version AS "protocol_version!", - miniblocks.fee_account_address AS "fee_account_address!" + miniblocks.fee_account_address AS "fee_account_address!", + miniblocks.l2_da_validator_address AS "l2_da_validator_address!", + miniblocks.pubdata_type AS "pubdata_type!" FROM miniblocks WHERE @@ -111,7 +113,7 @@ mod tests { block::{L1BatchHeader, L2BlockHeader}, Address, L1BatchNumber, ProtocolVersion, ProtocolVersionId, Transaction, }; - use zksync_vm_interface::TransactionExecutionMetrics; + use zksync_vm_interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use super::*; use crate::{ @@ -166,7 +168,11 @@ mod tests { }; let tx = mock_l2_transaction(); conn.transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); conn.blocks_dal() diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index db56b9d0e3e7..755d02769101 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -10,10 +10,7 @@ use zksync_db_connection::{ }; use zksync_types::{tee_types::TeeType, L1BatchNumber}; -use crate::{ - models::storage_tee_proof::StorageTeeProof, - tee_verifier_input_producer_dal::TeeVerifierInputProducerJobStatus, Core, -}; +use crate::{models::storage_tee_proof::StorageTeeProof, Core}; #[derive(Debug)] pub struct TeeProofGenerationDal<'a, 'c> { @@ -35,65 +32,76 @@ impl TeeProofGenerationDal<'_, '_> { &mut self, tee_type: TeeType, processing_timeout: Duration, - min_batch_number: Option, + min_batch_number: L1BatchNumber, ) -> DalResult> { let processing_timeout = pg_interval_from_duration(processing_timeout); - let min_batch_number = min_batch_number.map_or(0, |num| i64::from(num.0)); - let query = sqlx::query!( + let min_batch_number = i64::from(min_batch_number.0); + sqlx::query!( r#" - UPDATE tee_proof_generation_details - SET - status = $1, - updated_at = NOW(), - prover_taken_at = NOW() - WHERE - tee_type = $2 - AND l1_batch_number = ( - SELECT - proofs.l1_batch_number - FROM - tee_proof_generation_details AS proofs - JOIN - tee_verifier_input_producer_jobs AS inputs - ON proofs.l1_batch_number = inputs.l1_batch_number - WHERE - inputs.status = $3 - AND ( - proofs.status = $4 + WITH upsert AS ( + SELECT + p.l1_batch_number + FROM + proof_generation_details p + LEFT JOIN + tee_proof_generation_details tee + ON + p.l1_batch_number = tee.l1_batch_number + AND tee.tee_type = $1 + WHERE + ( + p.l1_batch_number >= $5 + AND p.vm_run_data_blob_url IS NOT NULL + AND p.proof_gen_data_blob_url IS NOT NULL + ) + AND ( + tee.l1_batch_number IS NULL + OR ( + tee.status = $3 OR ( - proofs.status = $1 - AND proofs.prover_taken_at < NOW() - $5::INTERVAL + tee.status = $2 + AND tee.prover_taken_at < NOW() - $4::INTERVAL ) ) - AND proofs.l1_batch_number >= $6 - ORDER BY - l1_batch_number ASC - LIMIT - 1 - FOR UPDATE - SKIP LOCKED - ) + ) + FETCH FIRST ROW ONLY + ) + + INSERT INTO + tee_proof_generation_details ( + l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at + ) + SELECT + l1_batch_number, + $1, + $2, + NOW(), + NOW(), + NOW() + FROM + upsert + ON CONFLICT (l1_batch_number, tee_type) DO + UPDATE + SET + status = $2, + updated_at = NOW(), + prover_taken_at = NOW() RETURNING - tee_proof_generation_details.l1_batch_number + l1_batch_number "#, - TeeProofGenerationJobStatus::PickedByProver.to_string(), tee_type.to_string(), - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, + TeeProofGenerationJobStatus::PickedByProver.to_string(), TeeProofGenerationJobStatus::Unpicked.to_string(), processing_timeout, min_batch_number - ); - - let batch_number = Instrumented::new("lock_batch_for_proving") - .with_arg("tee_type", &tee_type) - .with_arg("processing_timeout", &processing_timeout) - .with_arg("l1_batch_number", &min_batch_number) - .with(query) - .fetch_optional(self.storage) - .await? - .map(|row| L1BatchNumber(row.l1_batch_number as u32)); - - Ok(batch_number) + ) + .instrument("lock_batch_for_proving") + .with_arg("tee_type", &tee_type) + .with_arg("processing_timeout", &processing_timeout) + .with_arg("l1_batch_number", &min_batch_number) + .fetch_optional(self.storage) + .await + .map(|record| record.map(|record| L1BatchNumber(record.l1_batch_number as u32))) } pub async fn unlock_batch( @@ -176,38 +184,6 @@ impl TeeProofGenerationDal<'_, '_> { Ok(()) } - pub async fn insert_tee_proof_generation_job( - &mut self, - batch_number: L1BatchNumber, - tee_type: TeeType, - ) -> DalResult<()> { - let batch_number = i64::from(batch_number.0); - let query = sqlx::query!( - r#" - INSERT INTO - tee_proof_generation_details ( - l1_batch_number, tee_type, status, created_at, updated_at - ) - VALUES - ($1, $2, $3, NOW(), NOW()) - ON CONFLICT (l1_batch_number, tee_type) DO NOTHING - "#, - batch_number, - tee_type.to_string(), - TeeProofGenerationJobStatus::Unpicked.to_string(), - ); - let instrumentation = Instrumented::new("insert_tee_proof_generation_job") - .with_arg("l1_batch_number", &batch_number) - .with_arg("tee_type", &tee_type); - instrumentation - .clone() - .with(query) - .execute(self.storage) - .await?; - - Ok(()) - } - pub async fn save_attestation(&mut self, pubkey: &[u8], attestation: &[u8]) -> DalResult<()> { let query = sqlx::query!( r#" @@ -271,6 +247,40 @@ impl TeeProofGenerationDal<'_, '_> { Ok(proofs) } + /// For testing purposes only. + pub async fn insert_tee_proof_generation_job( + &mut self, + batch_number: L1BatchNumber, + tee_type: TeeType, + ) -> DalResult<()> { + let batch_number = i64::from(batch_number.0); + let query = sqlx::query!( + r#" + INSERT INTO + tee_proof_generation_details ( + l1_batch_number, tee_type, status, created_at, updated_at + ) + VALUES + ($1, $2, $3, NOW(), NOW()) + ON CONFLICT (l1_batch_number, tee_type) DO NOTHING + "#, + batch_number, + tee_type.to_string(), + TeeProofGenerationJobStatus::Unpicked.to_string(), + ); + let instrumentation = Instrumented::new("insert_tee_proof_generation_job") + .with_arg("l1_batch_number", &batch_number) + .with_arg("tee_type", &tee_type); + instrumentation + .clone() + .with(query) + .execute(self.storage) + .await?; + + Ok(()) + } + + /// For testing purposes only. pub async fn get_oldest_unpicked_batch(&mut self) -> DalResult> { let query = sqlx::query!( r#" @@ -278,18 +288,13 @@ impl TeeProofGenerationDal<'_, '_> { proofs.l1_batch_number FROM tee_proof_generation_details AS proofs - JOIN - tee_verifier_input_producer_jobs AS inputs - ON proofs.l1_batch_number = inputs.l1_batch_number WHERE - inputs.status = $1 - AND proofs.status = $2 + proofs.status = $1 ORDER BY proofs.l1_batch_number ASC LIMIT 1 "#, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, TeeProofGenerationJobStatus::Unpicked.to_string(), ); let batch_number = Instrumented::new("get_oldest_unpicked_batch") diff --git a/core/lib/dal/src/tee_verifier_input_producer_dal.rs b/core/lib/dal/src/tee_verifier_input_producer_dal.rs deleted file mode 100644 index dddb451a2d7d..000000000000 --- a/core/lib/dal/src/tee_verifier_input_producer_dal.rs +++ /dev/null @@ -1,234 +0,0 @@ -use std::time::{Duration, Instant}; - -use sqlx::postgres::types::PgInterval; -use zksync_db_connection::{ - connection::Connection, - error::DalResult, - instrument::InstrumentExt, - utils::{duration_to_naive_time, pg_interval_from_duration}, -}; -use zksync_types::L1BatchNumber; - -use crate::Core; - -#[derive(Debug)] -pub struct TeeVerifierInputProducerDal<'a, 'c> { - pub(crate) storage: &'a mut Connection<'c, Core>, -} - -/// The amount of attempts to process a job before giving up. -pub const JOB_MAX_ATTEMPT: i16 = 5; - -/// Time to wait for job to be processed -const JOB_PROCESSING_TIMEOUT: PgInterval = pg_interval_from_duration(Duration::from_secs(10 * 60)); - -/// Status of a job that the producer will work on. - -#[derive(Debug, sqlx::Type)] -#[sqlx(type_name = "tee_verifier_input_producer_job_status")] -pub enum TeeVerifierInputProducerJobStatus { - /// When the job is queued. Metadata calculator creates the job and marks it as queued. - Queued, - /// The job is not going to be processed. This state is designed for manual operations on DB. - /// It is expected to be used if some jobs should be skipped like: - /// - testing purposes (want to check a specific L1 Batch, I can mark everything before it skipped) - /// - trim down costs on some environments (if I've done breaking changes, - /// makes no sense to wait for everything to be processed, I can just skip them and save resources) - ManuallySkipped, - /// Currently being processed by one of the jobs. Transitory state, will transition to either - /// [`TeeVerifierInputProducerStatus::Successful`] or [`TeeVerifierInputProducerStatus::Failed`]. - InProgress, - /// The final (happy case) state we expect all jobs to end up. After the run is complete, - /// the job uploaded it's inputs, it lands in successful. - Successful, - /// The job failed for reasons. It will be marked as such and the error persisted in DB. - /// If it failed less than MAX_ATTEMPTs, the job will be retried, - /// otherwise it will stay in this state as final state. - Failed, -} - -impl TeeVerifierInputProducerDal<'_, '_> { - pub async fn create_tee_verifier_input_producer_job( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> DalResult<()> { - sqlx::query!( - r#" - INSERT INTO - tee_verifier_input_producer_jobs ( - l1_batch_number, status, created_at, updated_at - ) - VALUES - ($1, $2, NOW(), NOW()) - ON CONFLICT (l1_batch_number) DO NOTHING - "#, - i64::from(l1_batch_number.0), - TeeVerifierInputProducerJobStatus::Queued as TeeVerifierInputProducerJobStatus, - ) - .instrument("create_tee_verifier_input_producer_job") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .execute(self.storage) - .await?; - - Ok(()) - } - - pub async fn get_next_tee_verifier_input_producer_job( - &mut self, - ) -> DalResult> { - let l1_batch_number = sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - attempts = attempts + 1, - updated_at = NOW(), - processing_started_at = NOW() - WHERE - l1_batch_number = ( - SELECT - l1_batch_number - FROM - tee_verifier_input_producer_jobs - WHERE - status = $2 - OR ( - status = $1 - AND processing_started_at < NOW() - $4::INTERVAL - ) - OR ( - status = $3 - AND attempts < $5 - ) - ORDER BY - l1_batch_number ASC - LIMIT - 1 - FOR UPDATE - SKIP LOCKED - ) - RETURNING - tee_verifier_input_producer_jobs.l1_batch_number - "#, - TeeVerifierInputProducerJobStatus::InProgress as TeeVerifierInputProducerJobStatus, - TeeVerifierInputProducerJobStatus::Queued as TeeVerifierInputProducerJobStatus, - TeeVerifierInputProducerJobStatus::Failed as TeeVerifierInputProducerJobStatus, - &JOB_PROCESSING_TIMEOUT, - JOB_MAX_ATTEMPT, - ) - .instrument("get_next_tee_verifier_input_producer_job") - .report_latency() - .fetch_optional(self.storage) - .await? - .map(|job| L1BatchNumber(job.l1_batch_number as u32)); - - Ok(l1_batch_number) - } - - pub async fn get_tee_verifier_input_producer_job_attempts( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> DalResult> { - let attempts = sqlx::query!( - r#" - SELECT - attempts - FROM - tee_verifier_input_producer_jobs - WHERE - l1_batch_number = $1 - "#, - i64::from(l1_batch_number.0), - ) - .instrument("get_tee_verifier_input_producer_job_attempts") - .with_arg("l1_batch_number", &l1_batch_number) - .fetch_optional(self.storage) - .await? - .map(|job| job.attempts as u32); - - Ok(attempts) - } - - pub async fn mark_job_as_successful( - &mut self, - l1_batch_number: L1BatchNumber, - started_at: Instant, - object_path: &str, - ) -> DalResult<()> { - sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - updated_at = NOW(), - time_taken = $3, - input_blob_url = $4 - WHERE - l1_batch_number = $2 - "#, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, - i64::from(l1_batch_number.0), - duration_to_naive_time(started_at.elapsed()), - object_path, - ) - .instrument("mark_job_as_successful") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .execute(self.storage) - .await?; - - Ok(()) - } - - pub async fn mark_job_as_failed( - &mut self, - l1_batch_number: L1BatchNumber, - started_at: Instant, - error: String, - ) -> DalResult> { - let attempts = sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - updated_at = NOW(), - time_taken = $3, - error = $4 - WHERE - l1_batch_number = $2 - AND status != $5 - RETURNING - tee_verifier_input_producer_jobs.attempts - "#, - TeeVerifierInputProducerJobStatus::Failed as TeeVerifierInputProducerJobStatus, - i64::from(l1_batch_number.0), - duration_to_naive_time(started_at.elapsed()), - error, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, - ) - .instrument("mark_job_as_failed") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .fetch_optional(self.storage) - .await? - .map(|job| job.attempts as u32); - - Ok(attempts) - } -} - -/// These functions should only be used for tests. -impl TeeVerifierInputProducerDal<'_, '_> { - pub async fn delete_all_jobs(&mut self) -> DalResult<()> { - sqlx::query!( - r#" - DELETE FROM tee_verifier_input_producer_jobs - "# - ) - .instrument("delete_all_tee_verifier_jobs") - .execute(self.storage) - .await?; - Ok(()) - } -} diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index bf85008f7b58..11d4e55a55af 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -4,6 +4,7 @@ use zksync_contracts::BaseSystemContractsHashes; use zksync_db_connection::connection_pool::ConnectionPool; use zksync_types::{ block::{L1BatchHeader, L2BlockHasher, L2BlockHeader}, + commitment::PubdataParams, fee::Fee, fee_model::BatchFeeInput, helpers::unix_timestamp_ms, @@ -16,8 +17,8 @@ use zksync_types::{ L2ChainId, PriorityOpId, ProtocolVersion, ProtocolVersionId, H160, H256, U256, }; use zksync_vm_interface::{ - TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, - VmExecutionMetrics, + tracer::ValidationTraces, TransactionExecutionMetrics, TransactionExecutionResult, + TxExecutionStatus, VmEvent, VmExecutionMetrics, }; use crate::{ @@ -52,6 +53,7 @@ pub(crate) fn create_l2_block_header(number: u32) -> L2BlockHeader { virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: PubdataParams::default(), } } @@ -208,14 +210,22 @@ async fn workflow_with_submit_tx_equal_hashes() { let tx = mock_l2_transaction(); let result = transactions_dal - .insert_transaction_l2(&tx, mock_tx_execution_metrics()) + .insert_transaction_l2( + &tx, + mock_tx_execution_metrics(), + ValidationTraces::default(), + ) .await .unwrap(); assert_eq!(result, L2TxSubmissionResult::Added); let result = transactions_dal - .insert_transaction_l2(&tx, mock_tx_execution_metrics()) + .insert_transaction_l2( + &tx, + mock_tx_execution_metrics(), + ValidationTraces::default(), + ) .await .unwrap(); @@ -234,7 +244,11 @@ async fn workflow_with_submit_tx_diff_hashes() { let initiator_address = tx.common_data.initiator_address; let result = transactions_dal - .insert_transaction_l2(&tx, mock_tx_execution_metrics()) + .insert_transaction_l2( + &tx, + mock_tx_execution_metrics(), + ValidationTraces::default(), + ) .await .unwrap(); @@ -244,7 +258,11 @@ async fn workflow_with_submit_tx_diff_hashes() { tx.common_data.nonce = nonce; tx.common_data.initiator_address = initiator_address; let result = transactions_dal - .insert_transaction_l2(&tx, mock_tx_execution_metrics()) + .insert_transaction_l2( + &tx, + mock_tx_execution_metrics(), + ValidationTraces::default(), + ) .await .unwrap(); @@ -268,13 +286,21 @@ async fn remove_stuck_txs() { let mut tx = mock_l2_transaction(); tx.received_timestamp_ms = unix_timestamp_ms() - Duration::new(1000, 0).as_millis() as u64; transactions_dal - .insert_transaction_l2(&tx, mock_tx_execution_metrics()) + .insert_transaction_l2( + &tx, + mock_tx_execution_metrics(), + ValidationTraces::default(), + ) .await .unwrap(); // Tx in mempool let tx = mock_l2_transaction(); transactions_dal - .insert_transaction_l2(&tx, mock_tx_execution_metrics()) + .insert_transaction_l2( + &tx, + mock_tx_execution_metrics(), + ValidationTraces::default(), + ) .await .unwrap(); @@ -291,7 +317,11 @@ async fn remove_stuck_txs() { executed_tx.received_timestamp_ms = unix_timestamp_ms() - Duration::new(1000, 0).as_millis() as u64; transactions_dal - .insert_transaction_l2(&executed_tx, mock_tx_execution_metrics()) + .insert_transaction_l2( + &executed_tx, + mock_tx_execution_metrics(), + ValidationTraces::default(), + ) .await .unwrap(); diff --git a/core/lib/dal/src/tokens_dal.rs b/core/lib/dal/src/tokens_dal.rs index 218e152fa82a..b5fd67fc63c8 100644 --- a/core/lib/dal/src/tokens_dal.rs +++ b/core/lib/dal/src/tokens_dal.rs @@ -98,7 +98,7 @@ impl TokensDal<'_, '_> { .filter_map(|address| { if address.is_zero() { None - } else if let Some(deployed_at) = token_deployment_data.get(&address) { + } else if let Some((deployed_at, _)) = token_deployment_data.get(&address) { (deployed_at > &block_number).then_some(address.0) } else { // Token belongs to a "pending" L2 block that's not yet fully inserted to the database. diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 5314e9799b33..a5dfb8932ddb 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, fmt, time::Duration}; +use std::{cmp::min, collections::HashMap, fmt, time::Duration}; use bigdecimal::BigDecimal; use itertools::Itertools; @@ -12,17 +12,18 @@ use zksync_db_connection::{ use zksync_types::{ block::L2BlockExecutionData, debug_flat_call::CallTraceMeta, l1::L1Tx, l2::L2Tx, protocol_upgrade::ProtocolUpgradeTx, Address, ExecuteTransactionCommon, L1BatchNumber, - L1BlockNumber, L2BlockNumber, PriorityOpId, ProtocolVersionId, Transaction, H256, - PROTOCOL_UPGRADE_TX_TYPE, U256, + L1BlockNumber, L2BlockNumber, PriorityOpId, ProtocolVersionId, Transaction, + TransactionTimeRangeConstraint, H256, PROTOCOL_UPGRADE_TX_TYPE, U256, }; -use zksync_utils::u256_to_big_decimal; use zksync_vm_interface::{ - Call, TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, + tracer::ValidationTraces, Call, TransactionExecutionMetrics, TransactionExecutionResult, + TxExecutionStatus, }; use crate::{ - models::storage_transaction::{ - parse_call_trace, serialize_call_into_bytes, StorageTransaction, + models::{ + storage_transaction::{parse_call_trace, serialize_call_into_bytes, StorageTransaction}, + u256_to_big_decimal, }, Core, CoreDal, }; @@ -264,6 +265,7 @@ impl TransactionsDal<'_, '_> { &mut self, tx: &L2Tx, exec_info: TransactionExecutionMetrics, + validation_traces: ValidationTraces, ) -> DalResult { let tx_hash = tx.hash(); let is_duplicate = sqlx::query!( @@ -314,6 +316,16 @@ impl TransactionsDal<'_, '_> { let nanosecs = ((tx.received_timestamp_ms % 1000) * 1_000_000) as u32; #[allow(deprecated)] let received_at = NaiveDateTime::from_timestamp_opt(secs, nanosecs).unwrap(); + let max_timestamp = NaiveDateTime::MAX.and_utc().timestamp() as u64; + #[allow(deprecated)] + let timestamp_asserter_range_start = + validation_traces.timestamp_asserter_range.clone().map(|x| { + NaiveDateTime::from_timestamp_opt(min(x.start, max_timestamp) as i64, 0).unwrap() + }); + #[allow(deprecated)] + let timestamp_asserter_range_end = validation_traces.timestamp_asserter_range.map(|x| { + NaiveDateTime::from_timestamp_opt(min(x.end, max_timestamp) as i64, 0).unwrap() + }); // Besides just adding or updating(on conflict) the record, we want to extract some info // from the query below, to indicate what actually happened: // 1) transaction is added @@ -346,6 +358,8 @@ impl TransactionsDal<'_, '_> { paymaster_input, execution_info, received_at, + timestamp_asserter_range_start, + timestamp_asserter_range_end, created_at, updated_at ) @@ -376,6 +390,8 @@ impl TransactionsDal<'_, '_> { $18::INT ), $19, + $20, + $21, NOW(), NOW() ) @@ -406,6 +422,8 @@ impl TransactionsDal<'_, '_> { ), in_mempool = FALSE, received_at = $19, + timestamp_asserter_range_start = $20, + timestamp_asserter_range_end = $21, created_at = NOW(), updated_at = NOW(), error = NULL @@ -441,7 +459,9 @@ impl TransactionsDal<'_, '_> { exec_info.gas_used as i64, (exec_info.initial_storage_writes + exec_info.repeated_storage_writes) as i32, exec_info.contracts_used as i32, - received_at + received_at, + timestamp_asserter_range_start, + timestamp_asserter_range_end, ) .instrument("insert_transaction_l2") .with_arg("tx_hash", &tx_hash) @@ -1728,7 +1748,7 @@ impl TransactionsDal<'_, '_> { gas_per_pubdata: u32, fee_per_gas: u64, limit: usize, - ) -> DalResult> { + ) -> DalResult> { let stashed_addresses: Vec<_> = stashed_accounts.iter().map(Address::as_bytes).collect(); sqlx::query!( r#" @@ -1819,8 +1839,14 @@ impl TransactionsDal<'_, '_> { .fetch_all(self.storage) .await?; - let transactions = transactions.into_iter().map(|tx| tx.into()).collect(); - Ok(transactions) + let transactions_with_constraints = transactions + .into_iter() + .map(|tx| { + let constraint = TransactionTimeRangeConstraint::from(&tx); + (tx.into(), constraint) + }) + .collect(); + Ok(transactions_with_constraints) } pub async fn reset_mempool(&mut self) -> DalResult<()> { @@ -2212,6 +2238,29 @@ impl TransactionsDal<'_, '_> { .fetch_optional(self.storage) .await } + + pub async fn get_storage_tx_by_hash( + &mut self, + hash: H256, + ) -> DalResult> { + sqlx::query_as!( + StorageTransaction, + r#" + SELECT + * + FROM + transactions + WHERE + hash = $1 + "#, + hash.as_bytes() + ) + .map(Into::into) + .instrument("get_storage_tx_by_hash") + .with_arg("hash", &hash) + .fetch_optional(self.storage) + .await + } } #[cfg(test)] @@ -2240,7 +2289,11 @@ mod tests { let tx = mock_l2_transaction(); let tx_hash = tx.hash(); conn.transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); let mut tx_result = mock_execution_result(tx); diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index c2209bb9c938..44d7ed89c477 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -493,7 +493,7 @@ mod tests { use std::collections::HashMap; use zksync_types::{l2::L2Tx, Nonce, ProtocolVersion, ProtocolVersionId}; - use zksync_vm_interface::TransactionExecutionMetrics; + use zksync_vm_interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use super::*; use crate::{ @@ -509,7 +509,11 @@ mod tests { for tx in &txs { conn.transactions_dal() - .insert_transaction_l2(tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); } @@ -747,7 +751,11 @@ mod tests { tx.common_data.initiator_address = initiator; tx_by_nonce.insert(nonce, tx.clone()); conn.transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); } @@ -816,7 +824,11 @@ mod tests { tx.common_data.nonce = Nonce(1); tx.common_data.initiator_address = initiator; conn.transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); diff --git a/core/lib/env_config/src/contract_verifier.rs b/core/lib/env_config/src/contract_verifier.rs index 3079a8daa9cf..484e06341586 100644 --- a/core/lib/env_config/src/contract_verifier.rs +++ b/core/lib/env_config/src/contract_verifier.rs @@ -18,11 +18,8 @@ mod tests { fn expected_config() -> ContractVerifierConfig { ContractVerifierConfig { compilation_timeout: 30, - polling_interval: Some(1000), prometheus_port: 3314, - threads_per_server: Some(128), port: 3070, - url: "127.0.0.1:3070".to_string(), } } @@ -31,12 +28,8 @@ mod tests { let mut lock = MUTEX.lock(); let config = r#" CONTRACT_VERIFIER_COMPILATION_TIMEOUT=30 - CONTRACT_VERIFIER_POLLING_INTERVAL=1000 CONTRACT_VERIFIER_PROMETHEUS_PORT=3314 CONTRACT_VERIFIER_PORT=3070 - CONTRACT_VERIFIER_URL=127.0.0.1:3070 - CONTRACT_VERIFIER_THREADS_PER_SERVER=128 - "#; lock.set_env(config); diff --git a/core/lib/env_config/src/contracts.rs b/core/lib/env_config/src/contracts.rs index 298c43b80ccd..ae4e1d1d5b4f 100644 --- a/core/lib/env_config/src/contracts.rs +++ b/core/lib/env_config/src/contracts.rs @@ -63,6 +63,7 @@ mod tests { l2_weth_bridge_addr: Some(addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888")), l1_shared_bridge_proxy_addr: Some(addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888")), l2_shared_bridge_addr: Some(addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888")), + l2_legacy_shared_bridge_addr: Some(addr("8656770FA78c830456B00B4fFCeE6b1De0e1b888")), l2_testnet_paymaster_addr: Some(addr("FC073319977e314F251EAE6ae6bE76B0B3BAeeCF")), l1_multicall3_addr: addr("0xcA11bde05977b3631167028862bE2a173976CA11"), ecosystem_contracts: Some(EcosystemContracts { @@ -72,6 +73,8 @@ mod tests { }), base_token_addr: Some(SHARED_BRIDGE_ETHER_TOKEN_ADDRESS), chain_admin_addr: Some(addr("0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347ff")), + l2_da_validator_addr: Some(addr("0xed6fa5c14e7550b4caf2aa2818d24c69cbc347ff")), + l2_timestamp_asserter_addr: Some(addr("0x0000000000000000000000000000000000000002")), } } @@ -93,11 +96,14 @@ CONTRACTS_L2_CONSENSUS_REGISTRY_ADDR="D64e136566a9E04eb05B30184fF577F52682D182" CONTRACTS_L1_MULTICALL3_ADDR="0xcA11bde05977b3631167028862bE2a173976CA11" CONTRACTS_L1_SHARED_BRIDGE_PROXY_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_SHARED_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" +CONTRACTS_L2_LEGACY_SHARED_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_BRIDGEHUB_PROXY_ADDR="0x35ea7f92f4c5f433efe15284e99c040110cf6297" CONTRACTS_STATE_TRANSITION_PROXY_ADDR="0xd90f1c081c6117241624e97cb6147257c3cb2097" CONTRACTS_TRANSPARENT_PROXY_ADMIN_ADDR="0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347e5" CONTRACTS_BASE_TOKEN_ADDR="0x0000000000000000000000000000000000000001" CONTRACTS_CHAIN_ADMIN_ADDR="0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347ff" +CONTRACTS_L2_DA_VALIDATOR_ADDR="0xed6fa5c14e7550b4caf2aa2818d24c69cbc347ff" +CONTRACTS_L2_TIMESTAMP_ASSERTER_ADDR="0x0000000000000000000000000000000000000002" "#; lock.set_env(config); diff --git a/core/lib/env_config/src/da_client.rs b/core/lib/env_config/src/da_client.rs index 0fc3ad216f87..8ceeb215faf4 100644 --- a/core/lib/env_config/src/da_client.rs +++ b/core/lib/env_config/src/da_client.rs @@ -2,19 +2,39 @@ use std::env; use zksync_config::configs::{ da_client::{ - avail::AvailSecrets, DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, - OBJECT_STORE_CLIENT_CONFIG_NAME, + avail::{ + AvailClientConfig, AvailSecrets, AVAIL_FULL_CLIENT_NAME, AVAIL_GAS_RELAY_CLIENT_NAME, + }, + celestia::CelestiaSecrets, + eigen::EigenSecrets, + DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, CELESTIA_CLIENT_CONFIG_NAME, + EIGEN_CLIENT_CONFIG_NAME, OBJECT_STORE_CLIENT_CONFIG_NAME, }, secrets::DataAvailabilitySecrets, + AvailConfig, }; use crate::{envy_load, FromEnv}; impl FromEnv for DAClientConfig { fn from_env() -> anyhow::Result { - let client_tag = std::env::var("DA_CLIENT")?; + let client_tag = env::var("DA_CLIENT")?; let config = match client_tag.as_str() { - AVAIL_CLIENT_CONFIG_NAME => Self::Avail(envy_load("da_avail_config", "DA_")?), + AVAIL_CLIENT_CONFIG_NAME => Self::Avail(AvailConfig { + bridge_api_url: env::var("DA_BRIDGE_API_URL").ok().unwrap(), + timeout_ms: env::var("DA_TIMEOUT_MS")?.parse()?, + config: match env::var("DA_AVAIL_CLIENT_TYPE")?.as_str() { + AVAIL_FULL_CLIENT_NAME => { + AvailClientConfig::FullClient(envy_load("da_avail_full_client", "DA_")?) + } + AVAIL_GAS_RELAY_CLIENT_NAME => { + AvailClientConfig::GasRelay(envy_load("da_avail_gas_relay", "DA_")?) + } + _ => anyhow::bail!("Unknown Avail DA client type"), + }, + }), + CELESTIA_CLIENT_CONFIG_NAME => Self::Celestia(envy_load("da_celestia_config", "DA_")?), + EIGEN_CLIENT_CONFIG_NAME => Self::Eigen(envy_load("da_eigen_config", "DA_")?), OBJECT_STORE_CLIENT_CONFIG_NAME => { Self::ObjectStore(envy_load("da_object_store", "DA_")?) } @@ -30,12 +50,37 @@ impl FromEnv for DataAvailabilitySecrets { let client_tag = std::env::var("DA_CLIENT")?; let secrets = match client_tag.as_str() { AVAIL_CLIENT_CONFIG_NAME => { - let seed_phrase = env::var("DA_SECRETS_SEED_PHRASE") - .ok() - .map(|s| s.parse()) - .transpose()?; - Self::Avail(AvailSecrets { seed_phrase }) + let seed_phrase: Option = + env::var("DA_SECRETS_SEED_PHRASE") + .ok() + .map(|s| s.parse().unwrap()); + let gas_relay_api_key: Option = + env::var("DA_SECRETS_GAS_RELAY_API_KEY") + .ok() + .map(|s| s.parse().unwrap()); + if seed_phrase.is_none() && gas_relay_api_key.is_none() { + anyhow::bail!("No secrets provided for Avail DA client"); + } + Self::Avail(AvailSecrets { + seed_phrase, + gas_relay_api_key, + }) + } + CELESTIA_CLIENT_CONFIG_NAME => { + let private_key = env::var("DA_SECRETS_PRIVATE_KEY") + .map_err(|e| anyhow::format_err!("Celestia private key not found: {}", e))? + .parse() + .map_err(|e| anyhow::format_err!("failed to parse the private key: {}", e))?; + Self::Celestia(CelestiaSecrets { private_key }) + } + EIGEN_CLIENT_CONFIG_NAME => { + let private_key = env::var("DA_SECRETS_PRIVATE_KEY") + .map_err(|e| anyhow::format_err!("Eigen private key not found: {}", e))? + .parse() + .map_err(|e| anyhow::format_err!("failed to parse the private key: {}", e))?; + Self::Eigen(EigenSecrets { private_key }) } + _ => anyhow::bail!("Unknown DA client name: {}", client_tag), }; @@ -47,10 +92,13 @@ impl FromEnv for DataAvailabilitySecrets { mod tests { use zksync_config::{ configs::{ - da_client::{DAClientConfig, DAClientConfig::ObjectStore}, + da_client::{ + avail::{AvailClientConfig, AvailDefaultConfig}, + DAClientConfig::{self, ObjectStore}, + }, object_store::ObjectStoreMode::GCS, }, - AvailConfig, ObjectStoreConfig, + AvailConfig, CelestiaConfig, EigenConfig, ObjectStoreConfig, }; use super::*; @@ -90,15 +138,15 @@ mod tests { api_node_url: &str, bridge_api_url: &str, app_id: u32, - timeout: usize, - max_retries: usize, + timeout_ms: usize, ) -> DAClientConfig { DAClientConfig::Avail(AvailConfig { - api_node_url: api_node_url.to_string(), bridge_api_url: bridge_api_url.to_string(), - app_id, - timeout, - max_retries, + timeout_ms, + config: AvailClientConfig::FullClient(AvailDefaultConfig { + api_node_url: api_node_url.to_string(), + app_id, + }), }) } @@ -107,11 +155,13 @@ mod tests { let mut lock = MUTEX.lock(); let config = r#" DA_CLIENT="Avail" - DA_API_NODE_URL="localhost:12345" + DA_AVAIL_CLIENT_TYPE="FullClient" + DA_BRIDGE_API_URL="localhost:54321" + DA_TIMEOUT_MS="2000" + + DA_API_NODE_URL="localhost:12345" DA_APP_ID="1" - DA_TIMEOUT="2" - DA_MAX_RETRIES="3" "#; lock.set_env(config); @@ -123,8 +173,7 @@ mod tests { "localhost:12345", "localhost:54321", "1".parse::().unwrap(), - "2".parse::().unwrap(), - "3".parse::().unwrap(), + "2000".parse::().unwrap(), ) ); } @@ -139,13 +188,99 @@ mod tests { lock.set_env(config); - let actual = match DataAvailabilitySecrets::from_env().unwrap() { - DataAvailabilitySecrets::Avail(avail) => avail.seed_phrase, + let (actual_seed, actual_key) = match DataAvailabilitySecrets::from_env().unwrap() { + DataAvailabilitySecrets::Avail(avail) => (avail.seed_phrase, avail.gas_relay_api_key), + _ => { + panic!("Avail config expected") + } }; + assert_eq!( + (actual_seed.unwrap(), actual_key), + ( + "bottom drive obey lake curtain smoke basket hold race lonely fit walk" + .parse() + .unwrap(), + None + ) + ); + } + + fn expected_celestia_da_layer_config( + api_node_url: &str, + namespace: &str, + chain_id: &str, + timeout_ms: u64, + ) -> DAClientConfig { + DAClientConfig::Celestia(CelestiaConfig { + api_node_url: api_node_url.to_string(), + namespace: namespace.to_string(), + chain_id: chain_id.to_string(), + timeout_ms, + }) + } + + #[test] + fn from_env_celestia_client() { + let mut lock = MUTEX.lock(); + let config = r#" + DA_CLIENT="Celestia" + DA_API_NODE_URL="localhost:12345" + DA_NAMESPACE="0x1234567890abcdef" + DA_CHAIN_ID="mocha-4" + DA_TIMEOUT_MS="7000" + "#; + lock.set_env(config); + + let actual = DAClientConfig::from_env().unwrap(); + assert_eq!( + actual, + expected_celestia_da_layer_config( + "localhost:12345", + "0x1234567890abcdef", + "mocha-4", + 7000 + ) + ); + } + + #[test] + fn from_env_eigen_client() { + let mut lock = MUTEX.lock(); + let config = r#" + DA_CLIENT="Eigen" + DA_RPC_NODE_URL="localhost:12345" + DA_INCLUSION_POLLING_INTERVAL_MS="1000" + "#; + lock.set_env(config); + let actual = DAClientConfig::from_env().unwrap(); + assert_eq!( + actual, + DAClientConfig::Eigen(EigenConfig { + rpc_node_url: "localhost:12345".to_string(), + inclusion_polling_interval_ms: 1000, + }) + ); + } + + #[test] + fn from_env_celestia_secrets() { + let mut lock = MUTEX.lock(); + let config = r#" + DA_CLIENT="Celestia" + DA_SECRETS_PRIVATE_KEY="f55baf7c0e4e33b1d78fbf52f069c426bc36cff1aceb9bc8f45d14c07f034d73" + "#; + + lock.set_env(config); + + let DataAvailabilitySecrets::Celestia(actual) = + DataAvailabilitySecrets::from_env().unwrap() + else { + panic!("expected Celestia config") + }; assert_eq!( - actual.unwrap(), - "bottom drive obey lake curtain smoke basket hold race lonely fit walk" + actual.private_key, + "f55baf7c0e4e33b1d78fbf52f069c426bc36cff1aceb9bc8f45d14c07f034d73" .parse() .unwrap() ); diff --git a/core/lib/env_config/src/database.rs b/core/lib/env_config/src/database.rs index c067c96de73e..ae4c3059ce32 100644 --- a/core/lib/env_config/src/database.rs +++ b/core/lib/env_config/src/database.rs @@ -1,23 +1,8 @@ -use std::{env, error, str::FromStr}; +use std::env; -use anyhow::Context as _; use zksync_config::{configs::DatabaseSecrets, DBConfig, PostgresConfig}; -use crate::{envy_load, FromEnv}; - -fn parse_optional_var(name: &str) -> anyhow::Result> -where - T: FromStr, - T::Err: 'static + error::Error + Send + Sync, -{ - env::var(name) - .ok() - .map(|val| { - val.parse() - .with_context(|| format!("failed to parse env variable {name}")) - }) - .transpose() -} +use crate::{envy_load, utils::parse_optional_var, FromEnv}; impl FromEnv for DBConfig { fn from_env() -> anyhow::Result { @@ -103,6 +88,7 @@ mod tests { DATABASE_MERKLE_TREE_MAX_L1_BATCHES_PER_ITER=50 DATABASE_EXPERIMENTAL_STATE_KEEPER_DB_BLOCK_CACHE_CAPACITY_MB=64 DATABASE_EXPERIMENTAL_STATE_KEEPER_DB_MAX_OPEN_FILES=100 + DATABASE_EXPERIMENTAL_MERKLE_TREE_REPAIR_STALE_KEYS=true "#; lock.set_env(config); @@ -124,6 +110,7 @@ mod tests { db_config.experimental.state_keeper_db_max_open_files, NonZeroU32::new(100) ); + assert!(db_config.experimental.merkle_tree_repair_stale_keys); } #[test] @@ -133,6 +120,7 @@ mod tests { "DATABASE_STATE_KEEPER_DB_PATH", "DATABASE_EXPERIMENTAL_STATE_KEEPER_DB_MAX_OPEN_FILES", "DATABASE_EXPERIMENTAL_STATE_KEEPER_DB_BLOCK_CACHE_CAPACITY_MB", + "DATABASE_EXPERIMENTAL_MERKLE_TREE_REPAIR_STALE_KEYS", "DATABASE_MERKLE_TREE_BACKUP_PATH", "DATABASE_MERKLE_TREE_PATH", "DATABASE_MERKLE_TREE_MODE", @@ -159,6 +147,7 @@ mod tests { 128 ); assert_eq!(db_config.experimental.state_keeper_db_max_open_files, None); + assert!(!db_config.experimental.merkle_tree_repair_stale_keys); // Check that new env variable for Merkle tree path is supported lock.set_env("DATABASE_MERKLE_TREE_PATH=/db/tree/main"); diff --git a/core/lib/env_config/src/eth_sender.rs b/core/lib/env_config/src/eth_sender.rs index e5132eb7d91f..0fd61fd173b6 100644 --- a/core/lib/env_config/src/eth_sender.rs +++ b/core/lib/env_config/src/eth_sender.rs @@ -41,7 +41,8 @@ impl FromEnv for GasAdjusterConfig { #[cfg(test)] mod tests { - use zksync_config::configs::eth_sender::{ProofSendingMode, PubdataSendingMode}; + use zksync_basic_types::pubdata_da::PubdataSendingMode; + use zksync_config::configs::eth_sender::ProofSendingMode; use super::*; use crate::test_utils::{hash, EnvMutex}; diff --git a/core/lib/env_config/src/external_price_api_client.rs b/core/lib/env_config/src/external_price_api_client.rs index 60ddeea83151..07ab40e059d4 100644 --- a/core/lib/env_config/src/external_price_api_client.rs +++ b/core/lib/env_config/src/external_price_api_client.rs @@ -43,6 +43,7 @@ mod tests { numerator: Some(100), denominator: Some(1), fluctuation: Some(10), + next_value_fluctuation: 1, }), } } @@ -57,6 +58,7 @@ mod tests { EXTERNAL_PRICE_API_CLIENT_FORCED_NUMERATOR=100 EXTERNAL_PRICE_API_CLIENT_FORCED_DENOMINATOR=1 EXTERNAL_PRICE_API_CLIENT_FORCED_FLUCTUATION=10 + EXTERNAL_PRICE_API_CLIENT_FORCED_NEXT_VALUE_FLUCTUATION=1 "#; lock.set_env(config); diff --git a/core/lib/env_config/src/lib.rs b/core/lib/env_config/src/lib.rs index b72c2c5d5b94..325288056b35 100644 --- a/core/lib/env_config/src/lib.rs +++ b/core/lib/env_config/src/lib.rs @@ -33,6 +33,7 @@ mod vm_runner; mod wallets; mod da_client; +mod timestamp_asserter; pub trait FromEnv: Sized { fn from_env() -> anyhow::Result; diff --git a/core/lib/env_config/src/proof_data_handler.rs b/core/lib/env_config/src/proof_data_handler.rs index f69aa1d6dc59..47848585e769 100644 --- a/core/lib/env_config/src/proof_data_handler.rs +++ b/core/lib/env_config/src/proof_data_handler.rs @@ -4,12 +4,18 @@ use crate::{envy_load, FromEnv}; impl FromEnv for ProofDataHandlerConfig { fn from_env() -> anyhow::Result { - envy_load("proof_data_handler", "PROOF_DATA_HANDLER_") + Ok(Self { + tee_config: envy_load("proof_data_handler.tee", "PROOF_DATA_HANDLER_")?, + ..envy_load("proof_data_handler", "PROOF_DATA_HANDLER_")? + }) } } #[cfg(test)] mod tests { + use zksync_basic_types::L1BatchNumber; + use zksync_config::configs::TeeConfig; + use super::*; use crate::test_utils::EnvMutex; @@ -19,7 +25,11 @@ mod tests { ProofDataHandlerConfig { http_port: 3320, proof_generation_timeout_in_secs: 18000, - tee_support: true, + tee_config: TeeConfig { + tee_support: true, + first_tee_processed_batch: L1BatchNumber(1337), + tee_proof_generation_timeout_in_secs: 600, + }, } } @@ -29,6 +39,8 @@ mod tests { PROOF_DATA_HANDLER_PROOF_GENERATION_TIMEOUT_IN_SECS="18000" PROOF_DATA_HANDLER_HTTP_PORT="3320" PROOF_DATA_HANDLER_TEE_SUPPORT="true" + PROOF_DATA_HANDLER_FIRST_TEE_PROCESSED_BATCH="1337" + PROOF_DATA_HANDLER_TEE_PROOF_GENERATION_TIMEOUT_IN_SECS="600" "#; let mut lock = MUTEX.lock(); lock.set_env(config); diff --git a/core/lib/env_config/src/timestamp_asserter.rs b/core/lib/env_config/src/timestamp_asserter.rs new file mode 100644 index 000000000000..df586f5925ee --- /dev/null +++ b/core/lib/env_config/src/timestamp_asserter.rs @@ -0,0 +1,34 @@ +use zksync_config::configs::chain::TimestampAsserterConfig; + +use crate::{envy_load, FromEnv}; + +impl FromEnv for TimestampAsserterConfig { + fn from_env() -> anyhow::Result { + envy_load("timestamp_asserter", "TIMESTAMP_ASSERTER_") + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); + + #[test] + fn from_env_timestamp_asserter() { + let mut lock = MUTEX.lock(); + let config = r#" + TIMESTAMP_ASSERTER_MIN_TIME_TILL_END_SEC=2 + "#; + lock.set_env(config); + + let actual = TimestampAsserterConfig::from_env().unwrap(); + assert_eq!( + actual, + TimestampAsserterConfig { + min_time_till_end_sec: 2, + } + ); + } +} diff --git a/core/lib/env_config/src/utils.rs b/core/lib/env_config/src/utils.rs index 211e73ae2b17..9f363777bf69 100644 --- a/core/lib/env_config/src/utils.rs +++ b/core/lib/env_config/src/utils.rs @@ -1,3 +1,6 @@ +use std::{env, error, str::FromStr}; + +use anyhow::Context; use zksync_config::configs::PrometheusConfig; use crate::{envy_load, FromEnv}; @@ -7,3 +10,17 @@ impl FromEnv for PrometheusConfig { envy_load("prometheus", "API_PROMETHEUS_") } } + +pub fn parse_optional_var(name: &str) -> anyhow::Result> +where + T: FromStr, + T::Err: 'static + error::Error + Send + Sync, +{ + env::var(name) + .ok() + .map(|val| { + val.parse() + .with_context(|| format!("failed to parse env variable {name}")) + }) + .transpose() +} diff --git a/core/lib/env_config/src/vm_runner.rs b/core/lib/env_config/src/vm_runner.rs index 730a79dd340a..0a29d1256bd2 100644 --- a/core/lib/env_config/src/vm_runner.rs +++ b/core/lib/env_config/src/vm_runner.rs @@ -55,6 +55,7 @@ mod tests { let mut lock = MUTEX.lock(); let config = r#" EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE=new + EXPERIMENTAL_VM_API_FAST_VM_MODE=shadow EXPERIMENTAL_VM_PLAYGROUND_FAST_VM_MODE=shadow EXPERIMENTAL_VM_PLAYGROUND_DB_PATH=/db/vm_playground EXPERIMENTAL_VM_PLAYGROUND_FIRST_PROCESSED_BATCH=123 @@ -64,6 +65,7 @@ mod tests { let config = ExperimentalVmConfig::from_env().unwrap(); assert_eq!(config.state_keeper_fast_vm_mode, FastVmMode::New); + assert_eq!(config.api_fast_vm_mode, FastVmMode::Shadow); assert_eq!(config.playground.fast_vm_mode, FastVmMode::Shadow); assert_eq!(config.playground.db_path.unwrap(), "/db/vm_playground"); assert_eq!(config.playground.first_processed_batch, L1BatchNumber(123)); diff --git a/core/lib/eth_client/src/clients/http/query.rs b/core/lib/eth_client/src/clients/http/query.rs index 5e788509461d..de115cf6e7a6 100644 --- a/core/lib/eth_client/src/clients/http/query.rs +++ b/core/lib/eth_client/src/clients/http/query.rs @@ -15,7 +15,7 @@ use crate::{ BaseFees, EthFeeInterface, EthInterface, RawTransactionBytes, }; -const FEE_HISTORY_MAX_REQUEST_CHUNK: usize = 1024; +const FEE_HISTORY_MAX_REQUEST_CHUNK: usize = 1023; #[async_trait] impl EthInterface for T @@ -304,14 +304,14 @@ where COUNTERS.call[&(Method::BaseFeeHistory, client.component())].inc(); let latency = LATENCIES.direct[&Method::BaseFeeHistory].start(); let mut history = Vec::with_capacity(block_count); - let from_block = upto_block.saturating_sub(block_count); + let from_block = upto_block.saturating_sub(block_count - 1); // Here we are requesting `fee_history` from blocks // `(from_block; upto_block)` in chunks of size `MAX_REQUEST_CHUNK` // starting from the oldest block. for chunk_start in (from_block..=upto_block).step_by(FEE_HISTORY_MAX_REQUEST_CHUNK) { let chunk_end = (chunk_start + FEE_HISTORY_MAX_REQUEST_CHUNK).min(upto_block); - let chunk_size = chunk_end - chunk_start; + let chunk_size = chunk_end - chunk_start + 1; let fee_history = client .fee_history( @@ -324,22 +324,50 @@ where .with_arg("block", &chunk_end) .await?; - // Check that the lengths are the same. + if fee_history.oldest_block != web3::BlockNumber::Number(chunk_start.into()) { + let oldest_block = match fee_history.oldest_block { + web3::BlockNumber::Number(oldest_block) => oldest_block.to_string(), + _ => format!("{:?}", fee_history.oldest_block), + }; + let message = + format!("unexpected `oldest_block`, expected: {chunk_start}, got {oldest_block}"); + return Err(EnrichedClientError::custom(message, "l1_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); + } + + if fee_history.base_fee_per_gas.len() != chunk_size + 1 { + let message = format!( + "unexpected `base_fee_per_gas.len()`, expected: {}, got {}", + chunk_size + 1, + fee_history.base_fee_per_gas.len() + ); + return Err(EnrichedClientError::custom(message, "l1_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); + } + // Per specification, the values should always be provided, and must be 0 for blocks // prior to EIP-4844. // https://ethereum.github.io/execution-apis/api-documentation/ - if fee_history.base_fee_per_gas.len() != fee_history.base_fee_per_blob_gas.len() { - tracing::error!( - "base_fee_per_gas and base_fee_per_blob_gas have different lengths: {} and {}", - fee_history.base_fee_per_gas.len(), + if fee_history.base_fee_per_blob_gas.len() != chunk_size + 1 { + let message = format!( + "unexpected `base_fee_per_blob_gas.len()`, expected: {}, got {}", + chunk_size + 1, fee_history.base_fee_per_blob_gas.len() ); + return Err(EnrichedClientError::custom(message, "l1_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); } + // We take `chunk_size` entries for consistency with `l2_base_fee_history` which doesn't + // have correct data for block with number `upto_block + 1`. for (base, blob) in fee_history .base_fee_per_gas .into_iter() .zip(fee_history.base_fee_per_blob_gas) + .take(chunk_size) { let fees = BaseFees { base_fee_per_gas: cast_to_u64(base, "base_fee_per_gas")?, @@ -387,14 +415,14 @@ where COUNTERS.call[&(Method::L2FeeHistory, client.component())].inc(); let latency = LATENCIES.direct[&Method::BaseFeeHistory].start(); let mut history = Vec::with_capacity(block_count); - let from_block = upto_block.saturating_sub(block_count); + let from_block = upto_block.saturating_sub(block_count - 1); // Here we are requesting `fee_history` from blocks // `(from_block; upto_block)` in chunks of size `FEE_HISTORY_MAX_REQUEST_CHUNK` // starting from the oldest block. for chunk_start in (from_block..=upto_block).step_by(FEE_HISTORY_MAX_REQUEST_CHUNK) { let chunk_end = (chunk_start + FEE_HISTORY_MAX_REQUEST_CHUNK).min(upto_block); - let chunk_size = chunk_end - chunk_start; + let chunk_size = chunk_end - chunk_start + 1; let fee_history = client .fee_history(U64::from(chunk_size).into(), chunk_end.into(), vec![]) @@ -403,19 +431,46 @@ where .with_arg("block", &chunk_end) .await?; - // Check that the lengths are the same. - if fee_history.inner.base_fee_per_gas.len() != fee_history.l2_pubdata_price.len() { - tracing::error!( - "base_fee_per_gas and pubdata_price have different lengths: {} and {}", - fee_history.inner.base_fee_per_gas.len(), + if fee_history.inner.oldest_block != web3::BlockNumber::Number(chunk_start.into()) { + let oldest_block = match fee_history.inner.oldest_block { + web3::BlockNumber::Number(oldest_block) => oldest_block.to_string(), + _ => format!("{:?}", fee_history.inner.oldest_block), + }; + let message = + format!("unexpected `oldest_block`, expected: {chunk_start}, got {oldest_block}"); + return Err(EnrichedClientError::custom(message, "l2_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); + } + + if fee_history.inner.base_fee_per_gas.len() != chunk_size + 1 { + let message = format!( + "unexpected `base_fee_per_gas.len()`, expected: {}, got {}", + chunk_size + 1, + fee_history.inner.base_fee_per_gas.len() + ); + return Err(EnrichedClientError::custom(message, "l2_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); + } + + if fee_history.l2_pubdata_price.len() != chunk_size { + let message = format!( + "unexpected `l2_pubdata_price.len()`, expected: {}, got {}", + chunk_size + 1, fee_history.l2_pubdata_price.len() ); + return Err(EnrichedClientError::custom(message, "l2_fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("chunk_end", &chunk_end)); } + // We take `chunk_size` entries because base fee for block `upto_block + 1` may change. for (base, l2_pubdata_price) in fee_history .inner .base_fee_per_gas .into_iter() + .take(chunk_size) .zip(fee_history.l2_pubdata_price) { let fees = BaseFees { diff --git a/core/lib/eth_client/src/clients/mock.rs b/core/lib/eth_client/src/clients/mock.rs index b33554b6292c..8e81b6c6f209 100644 --- a/core/lib/eth_client/src/clients/mock.rs +++ b/core/lib/eth_client/src/clients/mock.rs @@ -415,25 +415,35 @@ fn l2_eth_fee_history( let from_block = from_block.as_usize(); let start_block = from_block.saturating_sub(block_count.as_usize() - 1); + // duplicates last value to follow `feeHistory` response format, it should return `block_count + 1` values + let base_fee_per_gas = base_fee_history[start_block..=from_block] + .iter() + .chain([&base_fee_history[from_block]]) + .map(|fee| U256::from(fee.base_fee_per_gas)) + .collect(); + + // duplicates last value to follow `feeHistory` response format, it should return `block_count + 1` values + let base_fee_per_blob_gas = base_fee_history[start_block..=from_block] + .iter() + .chain([&base_fee_history[from_block]]) // duplicate last value + .map(|fee| fee.base_fee_per_blob_gas) + .collect(); + + let l2_pubdata_price = base_fee_history[start_block..=from_block] + .iter() + .map(|fee| fee.l2_pubdata_price) + .collect(); + FeeHistory { inner: web3::FeeHistory { oldest_block: start_block.into(), - base_fee_per_gas: base_fee_history[start_block..=from_block] - .iter() - .map(|fee| U256::from(fee.base_fee_per_gas)) - .collect(), - base_fee_per_blob_gas: base_fee_history[start_block..=from_block] - .iter() - .map(|fee| fee.base_fee_per_blob_gas) - .collect(), + base_fee_per_gas, + base_fee_per_blob_gas, gas_used_ratio: vec![], // not used blob_gas_used_ratio: vec![], // not used reward: None, }, - l2_pubdata_price: base_fee_history[start_block..=from_block] - .iter() - .map(|fee| fee.l2_pubdata_price) - .collect(), + l2_pubdata_price, } } diff --git a/core/lib/external_price_api/src/forced_price_client.rs b/core/lib/external_price_api/src/forced_price_client.rs index a18c03fd8cab..cca2e7cce2a0 100644 --- a/core/lib/external_price_api/src/forced_price_client.rs +++ b/core/lib/external_price_api/src/forced_price_client.rs @@ -1,17 +1,23 @@ -use std::num::NonZeroU64; +use std::{ + cmp::{max, min}, + num::NonZeroU64, +}; use async_trait::async_trait; use rand::Rng; +use tokio::sync::Mutex; use zksync_config::configs::ExternalPriceApiClientConfig; use zksync_types::{base_token_ratio::BaseTokenAPIRatio, Address}; use crate::PriceAPIClient; // Struct for a forced price "client" (conversion ratio is always a configured "forced" ratio). -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct ForcedPriceClient { ratio: BaseTokenAPIRatio, + previous_numerator: Mutex, fluctuation: Option, + next_value_fluctuation: u32, } impl ForcedPriceClient { @@ -29,42 +35,70 @@ impl ForcedPriceClient { let fluctuation = forced_price_client_config .fluctuation .map(|x| x.clamp(0, 100)); + let next_value_fluctuation = forced_price_client_config + .next_value_fluctuation + .clamp(0, 100); - Self { - ratio: BaseTokenAPIRatio { + let ratio = if numerator < 100 && fluctuation.is_some_and(|f| f > 0) { + // If numerator is too small we need to multiply by 100 to make sure fluctuations can be applied + BaseTokenAPIRatio { + numerator: NonZeroU64::new(numerator * 100).unwrap(), + denominator: NonZeroU64::new(denominator * 100).unwrap(), + ratio_timestamp: chrono::Utc::now(), + } + } else { + BaseTokenAPIRatio { numerator: NonZeroU64::new(numerator).unwrap(), denominator: NonZeroU64::new(denominator).unwrap(), ratio_timestamp: chrono::Utc::now(), - }, + } + }; + + Self { + ratio, + previous_numerator: Mutex::new(NonZeroU64::new(numerator).unwrap()), fluctuation, + next_value_fluctuation, } } } #[async_trait] impl PriceAPIClient for ForcedPriceClient { - // Returns a ratio which is 10% higher or lower than the configured forced ratio. + /// Returns the configured ratio with fluctuation applied if enabled async fn fetch_ratio(&self, _token_address: Address) -> anyhow::Result { - if let Some(x) = self.fluctuation { - if x != 0 { - let mut rng = rand::thread_rng(); - - let mut adjust_range = |value: NonZeroU64| { - let value_f64 = value.get() as f64; - let min = (value_f64 * (1.0 - x as f64 / 100.0)).round() as u64; - let max = (value_f64 * (1.0 + x as f64 / 100.0)).round() as u64; - rng.gen_range(min..=max) - }; - let new_numerator = adjust_range(self.ratio.numerator); - let new_denominator = adjust_range(self.ratio.denominator); + if let Some(fluctation) = self.fluctuation { + let mut previous_numerator = self.previous_numerator.lock().await; + let mut rng = rand::thread_rng(); + let numerator_range = ( + max( + (self.ratio.numerator.get() as f64 * (1.0 - (fluctation as f64 / 100.0))) + .round() as u64, + (previous_numerator.get() as f64 + * (1.0 - (self.next_value_fluctuation as f64 / 100.0))) + .round() as u64, + ), + min( + (self.ratio.numerator.get() as f64 * (1.0 + (fluctation as f64 / 100.0))) + .round() as u64, + (previous_numerator.get() as f64 + * (1.0 + (self.next_value_fluctuation as f64 / 100.0))) + .round() as u64, + ), + ); - return Ok(BaseTokenAPIRatio { - numerator: NonZeroU64::new(new_numerator).unwrap_or(self.ratio.numerator), - denominator: NonZeroU64::new(new_denominator).unwrap_or(self.ratio.denominator), - ratio_timestamp: chrono::Utc::now(), - }); - } + let new_numerator = + NonZeroU64::new(rng.gen_range(numerator_range.0..=numerator_range.1)) + .unwrap_or(self.ratio.numerator); + let adjusted_ratio = BaseTokenAPIRatio { + numerator: new_numerator, + denominator: self.ratio.denominator, + ratio_timestamp: chrono::Utc::now(), + }; + *previous_numerator = new_numerator; + Ok(adjusted_ratio) + } else { + Ok(self.ratio) } - Ok(self.ratio) } } diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs index 883804f0bd6f..67819f7d7ccd 100644 --- a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs +++ b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs @@ -1,7 +1,7 @@ use zksync_types::{ commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, ethabi::Token, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, }; use crate::{ @@ -14,7 +14,7 @@ use crate::{ pub struct CommitBatches<'a> { pub last_committed_l1_batch: &'a L1BatchWithMetadata, pub l1_batches: &'a [L1BatchWithMetadata], - pub pubdata_da: PubdataDA, + pub pubdata_da: PubdataSendingMode, pub mode: L1BatchCommitmentMode, } diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index 179c04748d3b..6438aeb7f55c 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -4,7 +4,7 @@ use zksync_types::{ L1BatchWithMetadata, }, ethabi::Token, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, web3::contract::Error as ContractError, ProtocolVersionId, U256, }; @@ -24,14 +24,14 @@ const PUBDATA_SOURCE_CUSTOM: u8 = 2; pub struct CommitBatchInfo<'a> { mode: L1BatchCommitmentMode, l1_batch_with_metadata: &'a L1BatchWithMetadata, - pubdata_da: PubdataDA, + pubdata_da: PubdataSendingMode, } impl<'a> CommitBatchInfo<'a> { pub fn new( mode: L1BatchCommitmentMode, l1_batch_with_metadata: &'a L1BatchWithMetadata, - pubdata_da: PubdataDA, + pubdata_da: PubdataSendingMode, ) -> Self { Self { mode, @@ -204,24 +204,24 @@ impl Tokenizable for CommitBatchInfo<'_> { // Here we're not pushing any pubdata on purpose; no pubdata is sent in Validium mode. ( L1BatchCommitmentMode::Validium, - PubdataDA::Calldata | PubdataDA::RelayedL2Calldata, + PubdataSendingMode::Calldata | PubdataSendingMode::RelayedL2Calldata, ) => { vec![PUBDATA_SOURCE_CALLDATA] } - (L1BatchCommitmentMode::Validium, PubdataDA::Blobs) => { + (L1BatchCommitmentMode::Validium, PubdataSendingMode::Blobs) => { vec![PUBDATA_SOURCE_BLOBS] } - (L1BatchCommitmentMode::Rollup, PubdataDA::Custom) => { + (L1BatchCommitmentMode::Rollup, PubdataSendingMode::Custom) => { panic!("Custom pubdata DA is incompatible with Rollup mode") } - (L1BatchCommitmentMode::Validium, PubdataDA::Custom) => { + (L1BatchCommitmentMode::Validium, PubdataSendingMode::Custom) => { vec![PUBDATA_SOURCE_CUSTOM] } ( L1BatchCommitmentMode::Rollup, - PubdataDA::Calldata | PubdataDA::RelayedL2Calldata, + PubdataSendingMode::Calldata | PubdataSendingMode::RelayedL2Calldata, ) => { // We compute and add the blob commitment to the pubdata payload so that we can verify the proof // even if we are not using blobs. @@ -232,7 +232,7 @@ impl Tokenizable for CommitBatchInfo<'_> { .chain(blob_commitment) .collect() } - (L1BatchCommitmentMode::Rollup, PubdataDA::Blobs) => { + (L1BatchCommitmentMode::Rollup, PubdataSendingMode::Blobs) => { let pubdata = self.pubdata_input(); let pubdata_commitments = pubdata.chunks(ZK_SYNC_BYTES_PER_BLOB).flat_map(|blob| { diff --git a/core/lib/mempool/src/mempool_store.rs b/core/lib/mempool/src/mempool_store.rs index 334a4783a76c..70176b456dd1 100644 --- a/core/lib/mempool/src/mempool_store.rs +++ b/core/lib/mempool/src/mempool_store.rs @@ -1,7 +1,8 @@ -use std::collections::{hash_map, BTreeSet, HashMap, HashSet}; +use std::collections::{hash_map, BTreeSet, HashMap}; use zksync_types::{ l1::L1Tx, l2::L2Tx, Address, ExecuteTransactionCommon, Nonce, PriorityOpId, Transaction, + TransactionTimeRangeConstraint, }; use crate::types::{AccountTransactions, L2TxFilter, MempoolScore}; @@ -54,10 +55,10 @@ impl MempoolStore { /// in other cases mempool relies on state keeper and its internal state to keep that info up to date pub fn insert( &mut self, - transactions: Vec, + transactions: Vec<(Transaction, TransactionTimeRangeConstraint)>, initial_nonces: HashMap, ) { - for transaction in transactions { + for (transaction, constraint) in transactions { let Transaction { common_data, execute, @@ -85,6 +86,7 @@ impl MempoolStore { received_timestamp_ms, raw_bytes, }, + constraint, &initial_nonces, ); } @@ -95,20 +97,36 @@ impl MempoolStore { } } + #[cfg(test)] + pub fn insert_without_constraints( + &mut self, + transactions: Vec, + initial_nonces: HashMap, + ) { + self.insert( + transactions + .into_iter() + .map(|x| (x, TransactionTimeRangeConstraint::default())) + .collect(), + initial_nonces, + ); + } + fn insert_l2_transaction( &mut self, transaction: L2Tx, + constraint: TransactionTimeRangeConstraint, initial_nonces: &HashMap, ) { let account = transaction.initiator_account(); let metadata = match self.l2_transactions_per_account.entry(account) { - hash_map::Entry::Occupied(mut txs) => txs.get_mut().insert(transaction), + hash_map::Entry::Occupied(mut txs) => txs.get_mut().insert(transaction, constraint), hash_map::Entry::Vacant(entry) => { let account_nonce = initial_nonces.get(&account).cloned().unwrap_or(Nonce(0)); entry .insert(AccountTransactions::new(account_nonce)) - .insert(transaction) + .insert(transaction, constraint) } }; if let Some(score) = metadata.previous_score { @@ -133,10 +151,17 @@ impl MempoolStore { } /// Returns next transaction for execution from mempool - pub fn next_transaction(&mut self, filter: &L2TxFilter) -> Option { + pub fn next_transaction( + &mut self, + filter: &L2TxFilter, + ) -> Option<(Transaction, TransactionTimeRangeConstraint)> { if let Some(transaction) = self.l1_transactions.remove(&self.next_priority_id) { self.next_priority_id += 1; - return Some(transaction.into()); + // L1 transactions can't use block.timestamp in AA and hence do not need to have a constraint + return Some(( + transaction.into(), + TransactionTimeRangeConstraint::default(), + )); } let mut removed = 0; @@ -163,7 +188,7 @@ impl MempoolStore { self.stashed_accounts.push(stashed_pointer.account); } // insert pointer to the next transaction if it exists - let (transaction, score) = self + let (transaction, constraint, score) = self .l2_transactions_per_account .get_mut(&tx_pointer.account) .expect("mempool: dangling pointer in priority queue") @@ -176,28 +201,31 @@ impl MempoolStore { .size .checked_sub((removed + 1) as u64) .expect("mempool size can't be negative"); - Some(transaction.into()) + Some((transaction.into(), constraint)) } /// When a state_keeper starts the block over after a rejected transaction, /// we have to rollback the nonces/ids in the mempool and /// reinsert the transactions from the block back into mempool. - pub fn rollback(&mut self, tx: &Transaction) { + pub fn rollback(&mut self, tx: &Transaction) -> TransactionTimeRangeConstraint { // rolling back the nonces and priority ids match &tx.common_data { ExecuteTransactionCommon::L1(data) => { // reset next priority id self.next_priority_id = self.next_priority_id.min(data.serial_id); + TransactionTimeRangeConstraint::default() } ExecuteTransactionCommon::L2(_) => { - if let Some(score) = self + if let Some((score, constraint)) = self .l2_transactions_per_account .get_mut(&tx.initiator_account()) .expect("account is not available in mempool") .reset(tx) { self.l2_priority_queue.remove(&score); + return constraint; } + TransactionTimeRangeConstraint::default() } ExecuteTransactionCommon::ProtocolUpgrade(_) => { panic!("Protocol upgrade tx is not supposed to be in mempool"); @@ -221,22 +249,57 @@ impl MempoolStore { } fn gc(&mut self) -> Vec
{ - if self.size >= self.capacity { - let index: HashSet<_> = self + if self.size > self.capacity { + let mut transactions = std::mem::take(&mut self.l2_transactions_per_account); + let mut possibly_kept: Vec<_> = self .l2_priority_queue .iter() - .map(|pointer| pointer.account) + .rev() + .filter_map(|pointer| { + transactions + .remove(&pointer.account) + .map(|txs| (pointer.account, txs)) + }) .collect(); - let transactions = std::mem::take(&mut self.l2_transactions_per_account); - let (kept, drained) = transactions + + let mut sum = 0; + let mut number_of_accounts_kept = 0; + for (_, txs) in &possibly_kept { + sum += txs.len(); + if sum <= self.capacity as usize { + number_of_accounts_kept += 1; + } else { + break; + } + } + if number_of_accounts_kept == 0 && !possibly_kept.is_empty() { + tracing::warn!("mempool capacity is too low to handle txs from single account, consider increasing capacity"); + // Keep at least one entry, otherwise mempool won't return any new L2 tx to process. + number_of_accounts_kept = 1; + } + let (kept, drained) = { + let mut drained: Vec<_> = transactions.into_keys().collect(); + let also_drained = possibly_kept + .split_off(number_of_accounts_kept) + .into_iter() + .map(|(address, _)| address); + drained.extend(also_drained); + + (possibly_kept, drained) + }; + + let l2_priority_queue = std::mem::take(&mut self.l2_priority_queue); + self.l2_priority_queue = l2_priority_queue .into_iter() - .partition(|(address, _)| index.contains(address)); - self.l2_transactions_per_account = kept; + .rev() + .take(number_of_accounts_kept) + .collect(); + self.l2_transactions_per_account = kept.into_iter().collect(); self.size = self .l2_transactions_per_account .iter() - .fold(0, |agg, (_, tnxs)| agg + tnxs.len() as u64); - return drained.into_keys().collect(); + .fold(0, |agg, (_, txs)| agg + txs.len() as u64); + return drained; } vec![] } diff --git a/core/lib/mempool/src/tests.rs b/core/lib/mempool/src/tests.rs index 96ef600984f9..d40158ae9558 100644 --- a/core/lib/mempool/src/tests.rs +++ b/core/lib/mempool/src/tests.rs @@ -9,7 +9,7 @@ use zksync_types::{ l1::{OpProcessingType, PriorityQueueType}, l2::L2Tx, Address, Execute, ExecuteTransactionCommon, L1TxCommonData, Nonce, PriorityOpId, Transaction, - H256, U256, + TransactionTimeRangeConstraint, H256, U256, }; use crate::{mempool_store::MempoolStore, types::L2TxFilter}; @@ -27,7 +27,7 @@ fn basic_flow() { gen_l2_tx(account1, Nonce(1)), ]; assert_eq!(mempool.next_transaction(&L2TxFilter::default()), None); - mempool.insert(transactions, HashMap::new()); + mempool.insert_without_constraints(transactions, HashMap::new()); assert_eq!( view(mempool.next_transaction(&L2TxFilter::default())), (account0, 0) @@ -46,7 +46,7 @@ fn basic_flow() { ); assert_eq!(mempool.next_transaction(&L2TxFilter::default()), None); // unclog second account and insert more transactions - mempool.insert( + mempool.insert_without_constraints( vec![gen_l2_tx(account1, Nonce(0)), gen_l2_tx(account0, Nonce(3))], HashMap::new(), ); @@ -72,10 +72,10 @@ fn missing_txns() { ]; let mut nonces = HashMap::new(); nonces.insert(account, Nonce(5)); - mempool.insert(transactions, nonces); + mempool.insert_without_constraints(transactions, nonces); assert_eq!(mempool.next_transaction(&L2TxFilter::default()), None); // missing transaction unclogs mempool - mempool.insert(vec![gen_l2_tx(account, Nonce(5))], HashMap::new()); + mempool.insert_without_constraints(vec![gen_l2_tx(account, Nonce(5))], HashMap::new()); assert_eq!( view(mempool.next_transaction(&L2TxFilter::default())), (account, 5) @@ -90,7 +90,7 @@ fn missing_txns() { ); // filling remaining gap - mempool.insert(vec![gen_l2_tx(account, Nonce(8))], HashMap::new()); + mempool.insert_without_constraints(vec![gen_l2_tx(account, Nonce(8))], HashMap::new()); assert_eq!( view(mempool.next_transaction(&L2TxFilter::default())), (account, 8) @@ -110,10 +110,11 @@ fn prioritize_l1_txns() { gen_l2_tx(account, Nonce(1)), gen_l1_tx(PriorityOpId(0)), ]; - mempool.insert(transactions, HashMap::new()); + mempool.insert_without_constraints(transactions, HashMap::new()); assert!(mempool .next_transaction(&L2TxFilter::default()) .unwrap() + .0 .is_l1()) } @@ -125,13 +126,14 @@ fn l1_txns_priority_id() { gen_l1_tx(PriorityOpId(2)), gen_l1_tx(PriorityOpId(3)), ]; - mempool.insert(transactions, HashMap::new()); + mempool.insert_without_constraints(transactions, HashMap::new()); assert!(mempool.next_transaction(&L2TxFilter::default()).is_none()); - mempool.insert(vec![gen_l1_tx(PriorityOpId(0))], HashMap::new()); + mempool.insert_without_constraints(vec![gen_l1_tx(PriorityOpId(0))], HashMap::new()); for idx in 0..4 { let data = mempool .next_transaction(&L2TxFilter::default()) .unwrap() + .0 .common_data; match data { ExecuteTransactionCommon::L1(data) => { @@ -153,7 +155,7 @@ fn rejected_tx() { gen_l2_tx(account, Nonce(3)), gen_l2_tx(account, Nonce(5)), ]; - mempool.insert(transactions, HashMap::new()); + mempool.insert_without_constraints(transactions, HashMap::new()); assert_eq!( view(mempool.next_transaction(&L2TxFilter::default())), (account, 0) @@ -167,7 +169,7 @@ fn rejected_tx() { assert!(mempool.next_transaction(&L2TxFilter::default()).is_none()); // replace transaction and unblock account - mempool.insert(vec![gen_l2_tx(account, Nonce(1))], HashMap::new()); + mempool.insert_without_constraints(vec![gen_l2_tx(account, Nonce(1))], HashMap::new()); assert_eq!( view(mempool.next_transaction(&L2TxFilter::default())), (account, 1) @@ -186,9 +188,9 @@ fn rejected_tx() { fn replace_tx() { let mut mempool = MempoolStore::new(PriorityOpId(0), 100); let account = Address::random(); - mempool.insert(vec![gen_l2_tx(account, Nonce(0))], HashMap::new()); + mempool.insert_without_constraints(vec![gen_l2_tx(account, Nonce(0))], HashMap::new()); // replace it - mempool.insert( + mempool.insert_without_constraints( vec![gen_l2_tx_with_timestamp( account, Nonce(0), @@ -206,7 +208,7 @@ fn two_ready_txs() { let account0 = Address::random(); let account1 = Address::random(); let transactions = vec![gen_l2_tx(account0, Nonce(0)), gen_l2_tx(account1, Nonce(0))]; - mempool.insert(transactions, HashMap::new()); + mempool.insert_without_constraints(transactions, HashMap::new()); assert_eq!( HashSet::<(_, _)>::from_iter(vec![ view(mempool.next_transaction(&L2TxFilter::default())), @@ -228,10 +230,10 @@ fn mempool_size() { gen_l2_tx(account0, Nonce(3)), gen_l2_tx(account1, Nonce(1)), ]; - mempool.insert(transactions, HashMap::new()); + mempool.insert_without_constraints(transactions, HashMap::new()); assert_eq!(mempool.stats().l2_transaction_count, 5); // replacement - mempool.insert(vec![gen_l2_tx(account0, Nonce(2))], HashMap::new()); + mempool.insert_without_constraints(vec![gen_l2_tx(account0, Nonce(2))], HashMap::new()); assert_eq!(mempool.stats().l2_transaction_count, 5); // load next mempool.next_transaction(&L2TxFilter::default()); @@ -261,7 +263,7 @@ fn filtering() { // First account will have two transactions: one with too low pubdata price and one with the right value. // Second account will have just one transaction with the right value. - mempool.insert( + mempool.insert_without_constraints( gen_transactions_for_filtering(vec![ (account0, Nonce(0), unix_timestamp_ms(), 0), (account0, Nonce(1), unix_timestamp_ms(), 1), @@ -302,7 +304,7 @@ fn stashed_accounts() { let account0 = Address::random(); let account1 = Address::random(); - mempool.insert( + mempool.insert_without_constraints( gen_transactions_for_filtering(vec![ (account0, Nonce(0), unix_timestamp_ms(), 0), (account0, Nonce(1), unix_timestamp_ms(), 1), @@ -321,37 +323,32 @@ fn stashed_accounts() { #[test] fn mempool_capacity() { - let mut mempool = MempoolStore::new(PriorityOpId(0), 5); + let mut mempool = MempoolStore::new(PriorityOpId(0), 4); let account0 = Address::random(); let account1 = Address::random(); let account2 = Address::random(); + let account3 = Address::random(); let transactions = vec![ gen_l2_tx(account0, Nonce(0)), gen_l2_tx(account0, Nonce(1)), gen_l2_tx(account0, Nonce(2)), - gen_l2_tx(account1, Nonce(1)), - gen_l2_tx(account2, Nonce(1)), + gen_l2_tx_with_timestamp(account1, Nonce(0), unix_timestamp_ms() + 1), + gen_l2_tx_with_timestamp(account2, Nonce(0), unix_timestamp_ms() + 2), + gen_l2_tx(account3, Nonce(1)), ]; - mempool.insert(transactions, HashMap::new()); - // the mempool is full. Accounts with non-sequential nonces got stashed + mempool.insert_without_constraints(transactions, HashMap::new()); + // Mempool is full. Accounts with non-sequential nonces and some accounts with lowest score should be purged. assert_eq!( HashSet::<_>::from_iter(mempool.get_mempool_info().purged_accounts), - HashSet::<_>::from_iter(vec![account1, account2]), - ); - // verify that existing good-to-go transactions and new ones got picked - mempool.insert( - vec![gen_l2_tx_with_timestamp( - account1, - Nonce(0), - unix_timestamp_ms() + 1, - )], - HashMap::new(), + HashSet::from([account2, account3]), ); + // verify that good-to-go transactions are kept. for _ in 0..3 { assert_eq!( mempool .next_transaction(&L2TxFilter::default()) .unwrap() + .0 .initiator_account(), account0 ); @@ -360,9 +357,39 @@ fn mempool_capacity() { mempool .next_transaction(&L2TxFilter::default()) .unwrap() + .0 .initiator_account(), account1 ); + assert!(!mempool.has_next(&L2TxFilter::default())); +} + +#[test] +fn mempool_does_not_purge_all_accounts() { + let mut mempool = MempoolStore::new(PriorityOpId(0), 1); + let account0 = Address::random(); + let account1 = Address::random(); + let transactions = vec![ + gen_l2_tx(account0, Nonce(0)), + gen_l2_tx(account0, Nonce(1)), + gen_l2_tx(account1, Nonce(1)), + ]; + mempool.insert_without_constraints(transactions, HashMap::new()); + // Mempool is full. Account 1 has tx with non-sequential nonce so it should be purged. + // Txs from account 0 have sequential nonces but their number is greater than capacity; they should be kept. + assert_eq!(mempool.get_mempool_info().purged_accounts, vec![account1]); + // verify that good-to-go transactions are kept. + for _ in 0..2 { + assert_eq!( + mempool + .next_transaction(&L2TxFilter::default()) + .unwrap() + .0 + .initiator_account(), + account0 + ); + } + assert!(!mempool.has_next(&L2TxFilter::default())); } fn gen_l2_tx(address: Address, nonce: Nonce) -> Transaction { @@ -415,8 +442,8 @@ fn gen_l1_tx(priority_id: PriorityOpId) -> Transaction { } } -fn view(transaction: Option) -> (Address, u32) { - let tx = transaction.unwrap(); +fn view(transaction: Option<(Transaction, TransactionTimeRangeConstraint)>) -> (Address, u32) { + let tx = transaction.unwrap().0; (tx.initiator_account(), tx.nonce().unwrap().0) } diff --git a/core/lib/mempool/src/types.rs b/core/lib/mempool/src/types.rs index 99a63ffd08e2..7c2694dff5ef 100644 --- a/core/lib/mempool/src/types.rs +++ b/core/lib/mempool/src/types.rs @@ -1,14 +1,15 @@ use std::{cmp::Ordering, collections::HashMap}; use zksync_types::{ - fee::Fee, fee_model::BatchFeeInput, l2::L2Tx, Address, Nonce, Transaction, U256, + fee::Fee, fee_model::BatchFeeInput, l2::L2Tx, Address, Nonce, Transaction, + TransactionTimeRangeConstraint, U256, }; /// Pending mempool transactions of account #[derive(Debug)] pub(crate) struct AccountTransactions { /// transactions that belong to given account keyed by transaction nonce - transactions: HashMap, + transactions: HashMap, /// account nonce in mempool /// equals to committed nonce in db + number of transactions sent to state keeper nonce: Nonce, @@ -23,7 +24,11 @@ impl AccountTransactions { } /// Inserts new transaction for given account. Returns insertion metadata - pub fn insert(&mut self, transaction: L2Tx) -> InsertionMetadata { + pub fn insert( + &mut self, + transaction: L2Tx, + constraint: TransactionTimeRangeConstraint, + ) -> InsertionMetadata { let mut metadata = InsertionMetadata::default(); let nonce = transaction.common_data.nonce; // skip insertion if transaction is old @@ -33,8 +38,8 @@ impl AccountTransactions { let new_score = Self::score_for_transaction(&transaction); let previous_score = self .transactions - .insert(nonce, transaction) - .map(|tx| Self::score_for_transaction(&tx)); + .insert(nonce, (transaction, constraint)) + .map(|x| Self::score_for_transaction(&x.0)); metadata.is_new = previous_score.is_none(); if nonce == self.nonce { metadata.new_score = Some(new_score); @@ -43,9 +48,9 @@ impl AccountTransactions { metadata } - /// Returns next transaction to be included in block and optional score of its successor - /// Panics if no such transaction exists - pub fn next(&mut self) -> (L2Tx, Option) { + /// Returns next transaction to be included in block, its time range constraint and optional + /// score of its successor. Panics if no such transaction exists + pub fn next(&mut self) -> (L2Tx, TransactionTimeRangeConstraint, Option) { let transaction = self .transactions .remove(&self.nonce) @@ -54,12 +59,16 @@ impl AccountTransactions { let score = self .transactions .get(&self.nonce) - .map(Self::score_for_transaction); - (transaction, score) + .map(|(tx, _c)| Self::score_for_transaction(tx)); + (transaction.0, transaction.1, score) } - /// Handles transaction rejection. Returns optional score of its successor - pub fn reset(&mut self, transaction: &Transaction) -> Option { + /// Handles transaction rejection. Returns optional score of its successor and time range + /// constraint that the transaction has been added to the mempool with + pub fn reset( + &mut self, + transaction: &Transaction, + ) -> Option<(MempoolScore, TransactionTimeRangeConstraint)> { // current nonce for the group needs to be reset let tx_nonce = transaction .nonce() @@ -67,7 +76,7 @@ impl AccountTransactions { self.nonce = self.nonce.min(tx_nonce); self.transactions .get(&(tx_nonce + 1)) - .map(Self::score_for_transaction) + .map(|(tx, c)| (Self::score_for_transaction(tx), c.clone())) } pub fn len(&self) -> usize { diff --git a/core/lib/merkle_tree/Cargo.toml b/core/lib/merkle_tree/Cargo.toml index 579350bccf4e..e615258ba646 100644 --- a/core/lib/merkle_tree/Cargo.toml +++ b/core/lib/merkle_tree/Cargo.toml @@ -16,7 +16,6 @@ zksync_types.workspace = true zksync_crypto_primitives.workspace = true zksync_storage.workspace = true zksync_prover_interface.workspace = true -zksync_utils.workspace = true anyhow.workspace = true leb128.workspace = true diff --git a/core/lib/merkle_tree/src/domain.rs b/core/lib/merkle_tree/src/domain.rs index a4d577fc3ba5..5265f93264f2 100644 --- a/core/lib/merkle_tree/src/domain.rs +++ b/core/lib/merkle_tree/src/domain.rs @@ -9,10 +9,11 @@ use crate::{ consistency::ConsistencyError, storage::{PatchSet, Patched, RocksDBWrapper}, types::{ - Key, Root, TreeEntry, TreeEntryWithProof, TreeInstruction, TreeLogEntry, ValueHash, - TREE_DEPTH, + Key, NodeKey, RawNode, Root, TreeEntry, TreeEntryWithProof, TreeInstruction, TreeLogEntry, + ValueHash, TREE_DEPTH, }, BlockOutput, HashTree, MerkleTree, MerkleTreePruner, MerkleTreePrunerHandle, NoVersionError, + PruneDatabase, }; impl TreeInstruction { @@ -359,7 +360,10 @@ impl ZkSyncTree { pub fn roll_back_logs(&mut self, last_l1_batch_to_keep: L1BatchNumber) -> anyhow::Result<()> { self.tree.db.reset(); let retained_version_count = u64::from(last_l1_batch_to_keep.0 + 1); - self.tree.truncate_recent_versions(retained_version_count) + // Since `Patched<_>` doesn't implement `PruneDatabase`, we borrow the underlying DB, which is safe + // because the in-memory patch was reset above. + MerkleTree::new_unchecked(self.tree.db.inner_mut()) + .truncate_recent_versions(retained_version_count) } /// Saves the accumulated changes in the tree to RocksDB. @@ -406,6 +410,11 @@ impl ZkSyncTreeReader { &self.0.db } + /// Converts this reader to the underlying DB. + pub fn into_db(self) -> RocksDBWrapper { + self.0.db + } + /// Returns the root hash and leaf count at the specified L1 batch. pub fn root_info(&self, l1_batch_number: L1BatchNumber) -> Option<(ValueHash, u64)> { let root = self.0.root(l1_batch_number.0.into())?; @@ -444,6 +453,28 @@ impl ZkSyncTreeReader { self.0.entries_with_proofs(version, keys) } + /// Returns raw nodes for the specified `keys`. + pub fn raw_nodes(&self, keys: &[NodeKey]) -> Vec> { + let raw_nodes = self.0.db.raw_nodes(keys).into_iter(); + raw_nodes + .zip(keys) + .map(|(slice, key)| { + let slice = slice?; + Some(if key.is_empty() { + RawNode::deserialize_root(&slice) + } else { + RawNode::deserialize(&slice) + }) + }) + .collect() + } + + /// Returns raw stale keys obsoleted in the specified version of the tree. + pub fn raw_stale_keys(&self, l1_batch_number: L1BatchNumber) -> Vec { + let version = u64::from(l1_batch_number.0); + self.0.db.stale_keys(version) + } + /// Verifies consistency of the tree at the specified L1 batch number. /// /// # Errors diff --git a/core/lib/merkle_tree/src/errors.rs b/core/lib/merkle_tree/src/errors.rs index b8130717f93b..c187ce4977bf 100644 --- a/core/lib/merkle_tree/src/errors.rs +++ b/core/lib/merkle_tree/src/errors.rs @@ -22,6 +22,8 @@ pub enum DeserializeErrorKind { /// Bit mask specifying a child kind in an internal tree node is invalid. #[error("invalid bit mask specifying a child kind in an internal tree node")] InvalidChildKind, + #[error("data left after deserialization")] + Leftovers, /// Missing required tag in the tree manifest. #[error("missing required tag `{0}` in tree manifest")] diff --git a/core/lib/merkle_tree/src/lib.rs b/core/lib/merkle_tree/src/lib.rs index 6f9da59cf0ed..1782f373954c 100644 --- a/core/lib/merkle_tree/src/lib.rs +++ b/core/lib/merkle_tree/src/lib.rs @@ -71,6 +71,7 @@ mod hasher; mod metrics; mod pruning; pub mod recovery; +pub mod repair; mod storage; mod types; mod utils; @@ -82,7 +83,7 @@ mod utils; pub mod unstable { pub use crate::{ errors::DeserializeError, - types::{Manifest, Node, NodeKey, ProfiledTreeOperation, Root}, + types::{Manifest, Node, NodeKey, ProfiledTreeOperation, RawNode, Root}, }; } @@ -200,15 +201,12 @@ impl MerkleTree { root.unwrap_or(Root::Empty) } - /// Removes the most recent versions from the database. - /// - /// The current implementation does not actually remove node data for the removed versions - /// since it's likely to be reused in the future (especially upper-level internal nodes). - /// - /// # Errors - /// - /// Proxies database I/O errors. - pub fn truncate_recent_versions(&mut self, retained_version_count: u64) -> anyhow::Result<()> { + /// Incorrect version of [`Self::truncate_recent_versions()`] that doesn't remove stale keys for the truncated tree versions. + #[cfg(test)] + fn truncate_recent_versions_incorrectly( + &mut self, + retained_version_count: u64, + ) -> anyhow::Result<()> { let mut manifest = self.db.manifest().unwrap_or_default(); if manifest.version_count > retained_version_count { manifest.version_count = retained_version_count; @@ -259,6 +257,26 @@ impl MerkleTree { } impl MerkleTree { + /// Removes the most recent versions from the database. + /// + /// The current implementation does not actually remove node data for the removed versions + /// since it's likely to be reused in the future (especially upper-level internal nodes). + /// + /// # Errors + /// + /// Proxies database I/O errors. + pub fn truncate_recent_versions(&mut self, retained_version_count: u64) -> anyhow::Result<()> { + let mut manifest = self.db.manifest().unwrap_or_default(); + let current_version_count = manifest.version_count; + if current_version_count > retained_version_count { + // It is necessary to remove "future" stale keys since otherwise they may be used in future pruning and lead + // to non-obsolete tree nodes getting removed. + manifest.version_count = retained_version_count; + self.db.truncate(manifest, ..current_version_count)?; + } + Ok(()) + } + /// Returns the first retained version of the tree. pub fn first_retained_version(&self) -> Option { match self.db.min_stale_key_version() { diff --git a/core/lib/merkle_tree/src/pruning.rs b/core/lib/merkle_tree/src/pruning.rs index a74db40ef5e6..ae8300b893ab 100644 --- a/core/lib/merkle_tree/src/pruning.rs +++ b/core/lib/merkle_tree/src/pruning.rs @@ -250,7 +250,8 @@ mod tests { use super::*; use crate::{ types::{Node, NodeKey}, - Database, Key, MerkleTree, PatchSet, TreeEntry, ValueHash, + utils::testonly::setup_tree_with_stale_keys, + Database, Key, MerkleTree, PatchSet, RocksDBWrapper, TreeEntry, ValueHash, }; fn create_db() -> PatchSet { @@ -506,4 +507,39 @@ mod tests { println!("Keys are pruned after each update"); test_keys_are_removed_by_pruning_when_overwritten_in_multiple_batches(true); } + + fn test_pruning_with_truncation(mut db: impl PruneDatabase) { + setup_tree_with_stale_keys(&mut db, false); + + let stale_keys = db.stale_keys(1); + assert_eq!(stale_keys.len(), 1); + assert!( + stale_keys[0].is_empty() && stale_keys[0].version == 0, + "{stale_keys:?}" + ); + + let (mut pruner, _) = MerkleTreePruner::new(db); + let prunable_version = pruner.last_prunable_version().unwrap(); + assert_eq!(prunable_version, 1); + let stats = pruner + .prune_up_to(prunable_version) + .unwrap() + .expect("tree was not pruned"); + assert_eq!(stats.target_retained_version, 1); + assert_eq!(stats.pruned_key_count, 1); // only the root node should have been pruned + + let tree = MerkleTree::new(pruner.db).unwrap(); + tree.verify_consistency(1, false).unwrap(); + } + + #[test] + fn pruning_with_truncation() { + test_pruning_with_truncation(PatchSet::default()); + } + + #[test] + fn pruning_with_truncation_on_rocksdb() { + let temp_dir = tempfile::TempDir::new().unwrap(); + test_pruning_with_truncation(RocksDBWrapper::new(temp_dir.path()).unwrap()); + } } diff --git a/core/lib/merkle_tree/src/repair.rs b/core/lib/merkle_tree/src/repair.rs new file mode 100644 index 000000000000..c83569e96b13 --- /dev/null +++ b/core/lib/merkle_tree/src/repair.rs @@ -0,0 +1,376 @@ +//! Service tasks for the Merkle tree. + +use std::{ + ops, + sync::{mpsc, Arc, Mutex}, + time::{Duration, Instant}, +}; + +use anyhow::Context as _; +use rayon::prelude::*; + +use crate::{ + types::{NodeKey, StaleNodeKey}, + Database, PruneDatabase, RocksDBWrapper, +}; + +/// Persisted information about stale keys repair progress. +#[derive(Debug)] +pub(crate) struct StaleKeysRepairData { + pub next_version: u64, +} + +/// [`StaleKeysRepairTask`] progress stats. +#[derive(Debug, Clone, Default)] +pub struct StaleKeysRepairStats { + /// Versions checked by the task, or `None` if no versions have been checked. + pub checked_versions: Option>, + /// Number of repaired stale keys. + pub repaired_key_count: usize, +} + +#[derive(Debug)] +struct StepStats { + checked_versions: ops::RangeInclusive, + repaired_key_count: usize, +} + +/// Handle for a [`StaleKeysRepairTask`] allowing to abort its operation. +/// +/// The task is aborted once the handle is dropped. +#[must_use = "Paired `StaleKeysRepairTask` is aborted once handle is dropped"] +#[derive(Debug)] +pub struct StaleKeysRepairHandle { + stats: Arc>, + _aborted_sender: mpsc::Sender<()>, +} + +impl StaleKeysRepairHandle { + /// Returns stats for the paired task. + #[allow(clippy::missing_panics_doc)] // mutex poisoning shouldn't happen + pub fn stats(&self) -> StaleKeysRepairStats { + self.stats.lock().expect("stats mutex poisoned").clone() + } +} + +/// Task that repairs stale keys for the tree. +/// +/// Early tree versions contained a bug: If a tree version was truncated, stale keys for it remained intact. +/// If an overwritten tree version did not contain the same keys, this could lead to keys incorrectly marked as stale, +/// meaning that after pruning, a tree may end up broken. +#[derive(Debug)] +pub struct StaleKeysRepairTask { + db: RocksDBWrapper, + parallelism: u64, + poll_interval: Duration, + stats: Arc>, + aborted_receiver: mpsc::Receiver<()>, +} + +impl StaleKeysRepairTask { + /// Creates a new task. + pub fn new(db: RocksDBWrapper) -> (Self, StaleKeysRepairHandle) { + let (aborted_sender, aborted_receiver) = mpsc::channel(); + let stats = Arc::>::default(); + let this = Self { + db, + parallelism: (rayon::current_num_threads() as u64).max(1), + poll_interval: Duration::from_secs(60), + stats: stats.clone(), + aborted_receiver, + }; + let handle = StaleKeysRepairHandle { + stats, + _aborted_sender: aborted_sender, + }; + (this, handle) + } + + /// Sets the poll interval for this task. + pub fn set_poll_interval(&mut self, poll_interval: Duration) { + self.poll_interval = poll_interval; + } + + /// Runs stale key detection for a single tree version. + #[tracing::instrument(skip(db))] + pub fn bogus_stale_keys(db: &RocksDBWrapper, version: u64) -> Vec { + const SAMPLE_COUNT: usize = 5; + + let version_keys = db.all_keys_for_version(version).unwrap_or_else(|err| { + panic!("failed loading keys changed in tree version {version}: {err}") + }); + let stale_keys = db.stale_keys(version); + + if !version_keys.unreachable_keys.is_empty() { + let keys_sample: Vec<_> = version_keys + .unreachable_keys + .iter() + .take(SAMPLE_COUNT) + .collect::>(); + tracing::warn!( + version, + unreachable_keys.len = version_keys.unreachable_keys.len(), + unreachable_keys.sample = ?keys_sample, + "Found unreachable keys in tree" + ); + } + + let mut bogus_stale_keys = vec![]; + for stale_key in stale_keys { + if version_keys.valid_keys.contains(&stale_key.nibbles) { + // Normal case: a new node obsoletes a previous version. + } else if version_keys.unreachable_keys.contains(&stale_key.nibbles) { + // Explainable bogus stale key: a node that was updated in `version` before the truncation is no longer updated after truncation. + bogus_stale_keys.push(stale_key); + } else { + tracing::warn!( + version, + ?stale_key, + "Unexplained bogus stale key: not present in any nodes changed in the tree version" + ); + bogus_stale_keys.push(stale_key); + } + } + + if bogus_stale_keys.is_empty() { + return vec![]; + } + + let keys_sample: Vec<_> = bogus_stale_keys.iter().take(SAMPLE_COUNT).collect(); + tracing::info!( + stale_keys.len = bogus_stale_keys.len(), + stale_keys.sample = ?keys_sample, + "Found bogus stale keys" + ); + bogus_stale_keys + } + + /// Returns a boolean flag indicating whether the task data was updated. + fn step(&mut self) -> anyhow::Result> { + let repair_data = self + .db + .stale_keys_repair_data() + .context("failed getting repair data")?; + let min_stale_key_version = self.db.min_stale_key_version(); + let start_version = match (repair_data, min_stale_key_version) { + (_, None) => { + tracing::debug!("No stale keys in tree, nothing to do"); + return Ok(None); + } + (None, Some(version)) => version, + (Some(data), Some(version)) => data.next_version.max(version), + }; + + let latest_version = self + .db + .manifest() + .and_then(|manifest| manifest.version_count.checked_sub(1)); + let Some(latest_version) = latest_version else { + tracing::warn!( + min_stale_key_version, + "Tree has stale keys, but no latest versions" + ); + return Ok(None); + }; + + let end_version = (start_version + self.parallelism - 1).min(latest_version); + let versions = start_version..=end_version; + if versions.is_empty() { + tracing::debug!(?versions, latest_version, "No tree versions to check"); + return Ok(None); + } + + tracing::debug!( + ?versions, + latest_version, + ?min_stale_key_version, + "Checking stale keys" + ); + + let stale_keys = versions + .clone() + .into_par_iter() + .map(|version| { + Self::bogus_stale_keys(&self.db, version) + .into_iter() + .map(|key| StaleNodeKey::new(key, version)) + .collect::>() + }) + .reduce(Vec::new, |mut acc, keys| { + acc.extend(keys); + acc + }); + self.update_task_data(versions.clone(), &stale_keys)?; + + Ok(Some(StepStats { + checked_versions: versions, + repaired_key_count: stale_keys.len(), + })) + } + + #[tracing::instrument( + level = "debug", + err, + skip(self, removed_keys), + fields(removed_keys.len = removed_keys.len()), + )] + fn update_task_data( + &mut self, + versions: ops::RangeInclusive, + removed_keys: &[StaleNodeKey], + ) -> anyhow::Result<()> { + tracing::debug!("Updating task data"); + let started_at = Instant::now(); + let new_data = StaleKeysRepairData { + next_version: *versions.end() + 1, + }; + self.db + .repair_stale_keys(&new_data, removed_keys) + .context("failed removing bogus stale keys")?; + let latency = started_at.elapsed(); + tracing::debug!(?latency, "Updated task data"); + Ok(()) + } + + fn wait_for_abort(&mut self, timeout: Duration) -> bool { + match self.aborted_receiver.recv_timeout(timeout) { + Ok(()) | Err(mpsc::RecvTimeoutError::Disconnected) => true, + Err(mpsc::RecvTimeoutError::Timeout) => false, + } + } + + fn update_stats(&self, step_stats: StepStats) { + let mut stats = self.stats.lock().expect("stats mutex poisoned"); + if let Some(versions) = &mut stats.checked_versions { + *versions = *versions.start()..=*step_stats.checked_versions.end(); + } else { + stats.checked_versions = Some(step_stats.checked_versions); + } + stats.repaired_key_count += step_stats.repaired_key_count; + } + + /// Runs this task indefinitely. + /// + /// # Errors + /// + /// Propagates RocksDB I/O errors. + pub fn run(mut self) -> anyhow::Result<()> { + let repair_data = self + .db + .stale_keys_repair_data() + .context("failed getting repair data")?; + tracing::info!( + paralellism = self.parallelism, + poll_interval = ?self.poll_interval, + ?repair_data, + "Starting repair task" + ); + + let mut wait_interval = Duration::ZERO; + while !self.wait_for_abort(wait_interval) { + wait_interval = if let Some(step_stats) = self.step()? { + self.update_stats(step_stats); + Duration::ZERO + } else { + self.poll_interval + }; + } + tracing::info!("Stop signal received, stale keys repair is shut down"); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use std::thread; + + use super::*; + use crate::{ + utils::testonly::setup_tree_with_stale_keys, Key, MerkleTree, MerkleTreePruner, TreeEntry, + ValueHash, + }; + + #[test] + fn stale_keys_repair_with_normal_tree() { + let temp_dir = tempfile::TempDir::new().unwrap(); + let mut db = RocksDBWrapper::new(temp_dir.path()).unwrap(); + + // The task should work fine with future tree versions. + for version in [0, 1, 100] { + let bogus_stale_keys = StaleKeysRepairTask::bogus_stale_keys(&db, version); + assert!(bogus_stale_keys.is_empty()); + } + + let kvs: Vec<_> = (0_u64..100) + .map(|i| TreeEntry::new(Key::from(i), i + 1, ValueHash::zero())) + .collect(); + MerkleTree::new(&mut db).unwrap().extend(kvs).unwrap(); + + let bogus_stale_keys = StaleKeysRepairTask::bogus_stale_keys(&db, 0); + assert!(bogus_stale_keys.is_empty()); + } + + #[test] + fn detecting_bogus_stale_keys() { + let temp_dir = tempfile::TempDir::new().unwrap(); + let mut db = RocksDBWrapper::new(temp_dir.path()).unwrap(); + setup_tree_with_stale_keys(&mut db, true); + + let bogus_stale_keys = StaleKeysRepairTask::bogus_stale_keys(&db, 1); + assert!(!bogus_stale_keys.is_empty()); + + let (mut task, _handle) = StaleKeysRepairTask::new(db); + task.parallelism = 10; // Ensure that all tree versions are checked at once. + // Repair the tree. + let step_stats = task.step().unwrap().expect("tree was not repaired"); + assert_eq!(step_stats.checked_versions, 1..=1); + assert!(step_stats.repaired_key_count > 0); + // Check that the tree works fine once it's pruned. + let (mut pruner, _) = MerkleTreePruner::new(&mut task.db); + pruner.prune_up_to(1).unwrap().expect("tree was not pruned"); + + MerkleTree::new(&mut task.db) + .unwrap() + .verify_consistency(1, false) + .unwrap(); + + let bogus_stale_keys = StaleKeysRepairTask::bogus_stale_keys(&task.db, 1); + assert!(bogus_stale_keys.is_empty()); + MerkleTree::new(&mut task.db) + .unwrap() + .verify_consistency(1, false) + .unwrap(); + + assert!(task.step().unwrap().is_none()); + } + + #[test] + fn full_stale_keys_task_workflow() { + let temp_dir = tempfile::TempDir::new().unwrap(); + let mut db = RocksDBWrapper::new(temp_dir.path()).unwrap(); + setup_tree_with_stale_keys(&mut db, true); + + let (task, handle) = StaleKeysRepairTask::new(db.clone()); + let task_thread = thread::spawn(|| task.run()); + + loop { + if let Some(task_data) = db.stale_keys_repair_data().unwrap() { + if task_data.next_version == 2 { + // All tree versions are processed. + break; + } + } + thread::sleep(Duration::from_millis(50)); + } + let stats = handle.stats(); + assert_eq!(stats.checked_versions, Some(1..=1)); + assert!(stats.repaired_key_count > 0, "{stats:?}"); + + assert!(!task_thread.is_finished()); + drop(handle); + task_thread.join().unwrap().unwrap(); + + let bogus_stale_keys = StaleKeysRepairTask::bogus_stale_keys(&db, 1); + assert!(bogus_stale_keys.is_empty()); + } +} diff --git a/core/lib/merkle_tree/src/storage/database.rs b/core/lib/merkle_tree/src/storage/database.rs index a6e8a36c7084..a18deb643ca2 100644 --- a/core/lib/merkle_tree/src/storage/database.rs +++ b/core/lib/merkle_tree/src/storage/database.rs @@ -400,6 +400,17 @@ pub trait PruneDatabase: Database { /// /// Propagates database I/O errors. fn prune(&mut self, patch: PrunePatchSet) -> anyhow::Result<()>; + + /// Atomically truncates the specified range of versions and stale keys. + /// + /// # Errors + /// + /// Propagates database I/O errors. + fn truncate( + &mut self, + manifest: Manifest, + truncated_versions: ops::RangeTo, + ) -> anyhow::Result<()>; } impl PruneDatabase for &mut T { @@ -414,6 +425,14 @@ impl PruneDatabase for &mut T { fn prune(&mut self, patch: PrunePatchSet) -> anyhow::Result<()> { (**self).prune(patch) } + + fn truncate( + &mut self, + manifest: Manifest, + truncated_versions: ops::RangeTo, + ) -> anyhow::Result<()> { + (**self).truncate(manifest, truncated_versions) + } } impl PruneDatabase for PatchSet { @@ -447,6 +466,17 @@ impl PruneDatabase for PatchSet { .retain(|version, _| !patch.deleted_stale_key_versions.contains(version)); Ok(()) } + + fn truncate( + &mut self, + manifest: Manifest, + truncated_versions: ops::RangeTo, + ) -> anyhow::Result<()> { + self.manifest = manifest; + self.stale_keys_by_version + .retain(|version, _| !truncated_versions.contains(version)); + Ok(()) + } } #[cfg(test)] diff --git a/core/lib/merkle_tree/src/storage/parallel.rs b/core/lib/merkle_tree/src/storage/parallel.rs index c5368c4561d2..06b147efee8a 100644 --- a/core/lib/merkle_tree/src/storage/parallel.rs +++ b/core/lib/merkle_tree/src/storage/parallel.rs @@ -4,7 +4,7 @@ use std::{ any::Any, collections::{HashMap, VecDeque}, error::Error as StdError, - mem, + mem, ops, sync::{mpsc, Arc}, thread, time::Duration, @@ -375,6 +375,17 @@ impl PruneDatabase for ParallelDatabase { .context("failed synchronizing database before pruning")?; self.inner.prune(patch) } + + fn truncate( + &mut self, + manifest: Manifest, + truncated_versions: ops::RangeTo, + ) -> anyhow::Result<()> { + // Require the underlying database to be fully synced. + self.wait_sync() + .context("failed synchronizing database before truncation")?; + self.inner.truncate(manifest, truncated_versions) + } } /// Database with either sequential or parallel persistence. @@ -479,6 +490,17 @@ impl PruneDatabase for MaybeParallel { Self::Parallel(db) => db.prune(patch), } } + + fn truncate( + &mut self, + manifest: Manifest, + truncated_versions: ops::RangeTo, + ) -> anyhow::Result<()> { + match self { + Self::Sequential(db) => db.truncate(manifest, truncated_versions), + Self::Parallel(db) => db.truncate(manifest, truncated_versions), + } + } } #[cfg(test)] diff --git a/core/lib/merkle_tree/src/storage/rocksdb.rs b/core/lib/merkle_tree/src/storage/rocksdb.rs index 711ccaa6137e..5a40c82b680c 100644 --- a/core/lib/merkle_tree/src/storage/rocksdb.rs +++ b/core/lib/merkle_tree/src/storage/rocksdb.rs @@ -1,6 +1,13 @@ //! RocksDB implementation of [`Database`]. -use std::{any::Any, cell::RefCell, path::Path, sync::Arc}; +use std::{ + any::Any, + cell::RefCell, + collections::{HashMap, HashSet}, + ops, + path::Path, + sync::Arc, +}; use anyhow::Context as _; use rayon::prelude::*; @@ -15,6 +22,7 @@ use zksync_storage::{ use crate::{ errors::{DeserializeError, ErrorContext}, metrics::ApplyPatchStats, + repair::StaleKeysRepairData, storage::{ database::{PruneDatabase, PrunePatchSet}, Database, NodeKeys, PatchSet, @@ -53,6 +61,32 @@ impl NamedColumnFamily for MerkleTreeColumnFamily { type LocalProfiledOperation = RefCell>>; +/// Unifies keys that can be used to load raw data from RocksDB. +pub(crate) trait ToDbKey: Sync { + fn to_db_key(&self) -> Vec; +} + +impl ToDbKey for NodeKey { + fn to_db_key(&self) -> Vec { + NodeKey::to_db_key(*self) + } +} + +impl ToDbKey for (NodeKey, bool) { + fn to_db_key(&self) -> Vec { + NodeKey::to_db_key(self.0) + } +} + +/// All node keys modified in a certain version of the tree, loaded via a prefix iterator. +#[derive(Debug, Default)] +pub(crate) struct VersionKeys { + /// Valid / reachable keys modified in the version. + pub valid_keys: HashSet, + /// Unreachable keys modified in the version, e.g. as a result of truncating the tree and overwriting the version. + pub unreachable_keys: HashSet, +} + /// Main [`Database`] implementation wrapping a [`RocksDB`] reference. /// /// # Cloning @@ -80,6 +114,8 @@ impl RocksDBWrapper { // since the minimum node key is [0, 0, 0, 0, 0, 0, 0, 0]. const MANIFEST_KEY: &'static [u8] = &[0]; + const STALE_KEYS_REPAIR_KEY: &'static [u8] = &[0, 0]; + /// Creates a new wrapper, initializing RocksDB at the specified directory. /// /// # Errors @@ -112,7 +148,7 @@ impl RocksDBWrapper { .expect("Failed reading from RocksDB") } - fn raw_nodes(&self, keys: &NodeKeys) -> Vec>> { + pub(crate) fn raw_nodes(&self, keys: &[T]) -> Vec>> { // Propagate the currently profiled operation to rayon threads used in the parallel iterator below. let profiled_operation = self .profiled_operation @@ -126,7 +162,7 @@ impl RocksDBWrapper { let _guard = profiled_operation .as_ref() .and_then(ProfiledOperation::start_profiling); - let keys = chunk.iter().map(|(key, _)| key.to_db_key()); + let keys = chunk.iter().map(ToDbKey::to_db_key); let results = self.db.multi_get_cf(MerkleTreeColumnFamily::Tree, keys); results .into_iter() @@ -144,9 +180,9 @@ impl RocksDBWrapper { // If we didn't succeed with the patch set, or the key version is old, // access the underlying storage. let node = if is_leaf { - LeafNode::deserialize(raw_node).map(Node::Leaf) + LeafNode::deserialize(raw_node, false).map(Node::Leaf) } else { - InternalNode::deserialize(raw_node).map(Node::Internal) + InternalNode::deserialize(raw_node, false).map(Node::Internal) }; node.map_err(|err| { err.with_context(if is_leaf { @@ -157,6 +193,83 @@ impl RocksDBWrapper { }) } + pub(crate) fn all_keys_for_version( + &self, + version: u64, + ) -> Result { + let Some(Root::Filled { + node: root_node, .. + }) = self.root(version) + else { + return Ok(VersionKeys::default()); + }; + + let cf = MerkleTreeColumnFamily::Tree; + let version_prefix = version.to_be_bytes(); + let mut nodes = HashMap::from([(Nibbles::EMPTY, root_node)]); + let mut unreachable_keys = HashSet::new(); + + for (raw_key, raw_value) in self.db.prefix_iterator_cf(cf, &version_prefix) { + let key = NodeKey::from_db_key(&raw_key); + let Some((parent_nibbles, nibble)) = key.nibbles.split_last() else { + // Root node, already processed + continue; + }; + let Some(Node::Internal(parent)) = nodes.get(&parent_nibbles) else { + unreachable_keys.insert(key.nibbles); + continue; + }; + let Some(this_ref) = parent.child_ref(nibble) else { + unreachable_keys.insert(key.nibbles); + continue; + }; + if this_ref.version != version { + unreachable_keys.insert(key.nibbles); + continue; + } + + // Now we are sure that `this_ref` actually points to the node we're processing. + let node = Self::deserialize_node(&raw_value, &key, this_ref.is_leaf)?; + nodes.insert(key.nibbles, node); + } + + Ok(VersionKeys { + valid_keys: nodes.into_keys().collect(), + unreachable_keys, + }) + } + + pub(crate) fn repair_stale_keys( + &mut self, + data: &StaleKeysRepairData, + removed_keys: &[StaleNodeKey], + ) -> anyhow::Result<()> { + let mut raw_value = vec![]; + data.serialize(&mut raw_value); + + let mut write_batch = self.db.new_write_batch(); + write_batch.put_cf( + MerkleTreeColumnFamily::Tree, + Self::STALE_KEYS_REPAIR_KEY, + &raw_value, + ); + for key in removed_keys { + write_batch.delete_cf(MerkleTreeColumnFamily::StaleKeys, &key.to_db_key()); + } + self.db + .write(write_batch) + .context("Failed writing a batch to RocksDB") + } + + pub(crate) fn stale_keys_repair_data( + &self, + ) -> Result, DeserializeError> { + let Some(raw_value) = self.raw_node(Self::STALE_KEYS_REPAIR_KEY) else { + return Ok(None); + }; + StaleKeysRepairData::deserialize(&raw_value).map(Some) + } + /// Returns the wrapped RocksDB instance. pub fn into_inner(self) -> RocksDB { self.db @@ -187,7 +300,7 @@ impl Database for RocksDBWrapper { let Some(raw_root) = self.raw_node(&NodeKey::empty(version).to_db_key()) else { return Ok(None); }; - Root::deserialize(&raw_root) + Root::deserialize(&raw_root, false) .map(Some) .map_err(|err| err.with_context(ErrorContext::Root(version))) } @@ -334,6 +447,32 @@ impl PruneDatabase for RocksDBWrapper { .write(write_batch) .context("Failed writing a batch to RocksDB") } + + fn truncate( + &mut self, + manifest: Manifest, + truncated_versions: ops::RangeTo, + ) -> anyhow::Result<()> { + anyhow::ensure!( + manifest.version_count <= truncated_versions.end, + "Invalid truncate call: manifest={manifest:?}, truncated_versions={truncated_versions:?}" + ); + let mut write_batch = self.db.new_write_batch(); + + let tree_cf = MerkleTreeColumnFamily::Tree; + let mut node_bytes = Vec::with_capacity(128); + manifest.serialize(&mut node_bytes); + write_batch.put_cf(tree_cf, Self::MANIFEST_KEY, &node_bytes); + + let stale_keys_cf = MerkleTreeColumnFamily::StaleKeys; + let first_version = &manifest.version_count.to_be_bytes() as &[_]; + let last_version = &truncated_versions.end.to_be_bytes(); + write_batch.delete_range_cf(stale_keys_cf, first_version..last_version); + + self.db + .write(write_batch) + .context("Failed writing a batch to RocksDB") + } } #[cfg(test)] diff --git a/core/lib/merkle_tree/src/storage/serialization.rs b/core/lib/merkle_tree/src/storage/serialization.rs index f21fece94e09..700a4cd5020b 100644 --- a/core/lib/merkle_tree/src/storage/serialization.rs +++ b/core/lib/merkle_tree/src/storage/serialization.rs @@ -4,8 +4,9 @@ use std::{collections::HashMap, str}; use crate::{ errors::{DeserializeError, DeserializeErrorKind, ErrorContext}, + repair::StaleKeysRepairData, types::{ - ChildRef, InternalNode, Key, LeafNode, Manifest, Node, Root, TreeTags, ValueHash, + ChildRef, InternalNode, Key, LeafNode, Manifest, Node, RawNode, Root, TreeTags, ValueHash, HASH_SIZE, KEY_SIZE, }, }; @@ -15,7 +16,7 @@ use crate::{ const LEB128_SIZE_ESTIMATE: usize = 3; impl LeafNode { - pub(super) fn deserialize(bytes: &[u8]) -> Result { + pub(super) fn deserialize(bytes: &[u8], strict: bool) -> Result { if bytes.len() < KEY_SIZE + HASH_SIZE { return Err(DeserializeErrorKind::UnexpectedEof.into()); } @@ -26,6 +27,10 @@ impl LeafNode { let leaf_index = leb128::read::unsigned(&mut bytes).map_err(|err| { DeserializeErrorKind::Leb128(err).with_context(ErrorContext::LeafIndex) })?; + if strict && !bytes.is_empty() { + return Err(DeserializeErrorKind::Leftovers.into()); + } + Ok(Self { full_key, value_hash, @@ -105,7 +110,7 @@ impl ChildRef { } impl InternalNode { - pub(super) fn deserialize(bytes: &[u8]) -> Result { + pub(super) fn deserialize(bytes: &[u8], strict: bool) -> Result { if bytes.len() < 4 { let err = DeserializeErrorKind::UnexpectedEof; return Err(err.with_context(ErrorContext::ChildrenMask)); @@ -134,6 +139,9 @@ impl InternalNode { } bitmap >>= 2; } + if strict && !bytes.is_empty() { + return Err(DeserializeErrorKind::Leftovers.into()); + } Ok(this) } @@ -161,8 +169,36 @@ impl InternalNode { } } +impl RawNode { + pub(crate) fn deserialize(bytes: &[u8]) -> Self { + Self { + raw: bytes.to_vec(), + leaf: LeafNode::deserialize(bytes, true).ok(), + internal: InternalNode::deserialize(bytes, true).ok(), + } + } + + pub(crate) fn deserialize_root(bytes: &[u8]) -> Self { + let root = Root::deserialize(bytes, true).ok(); + let node = root.and_then(|root| match root { + Root::Empty => None, + Root::Filled { node, .. } => Some(node), + }); + let (leaf, internal) = match node { + None => (None, None), + Some(Node::Leaf(leaf)) => (Some(leaf), None), + Some(Node::Internal(node)) => (None, Some(node)), + }; + Self { + raw: bytes.to_vec(), + leaf, + internal, + } + } +} + impl Root { - pub(super) fn deserialize(mut bytes: &[u8]) -> Result { + pub(super) fn deserialize(mut bytes: &[u8], strict: bool) -> Result { let leaf_count = leb128::read::unsigned(&mut bytes).map_err(|err| { DeserializeErrorKind::Leb128(err).with_context(ErrorContext::LeafCount) })?; @@ -172,11 +208,11 @@ impl Root { // Try both the leaf and internal node serialization; in some cases, a single leaf // may still be persisted as an internal node. Since serialization of an internal node with a single child // is always shorter than that a leaf, the order (first leaf, then internal node) is chosen intentionally. - LeafNode::deserialize(bytes) + LeafNode::deserialize(bytes, strict) .map(Node::Leaf) - .or_else(|_| InternalNode::deserialize(bytes).map(Node::Internal))? + .or_else(|_| InternalNode::deserialize(bytes, strict).map(Node::Internal))? } - _ => Node::Internal(InternalNode::deserialize(bytes)?), + _ => Node::Internal(InternalNode::deserialize(bytes, strict)?), }; Ok(Self::new(leaf_count, node)) } @@ -320,6 +356,18 @@ impl Manifest { } } +impl StaleKeysRepairData { + pub(super) fn deserialize(mut bytes: &[u8]) -> Result { + let next_version = + leb128::read::unsigned(&mut bytes).map_err(DeserializeErrorKind::Leb128)?; + Ok(Self { next_version }) + } + + pub(super) fn serialize(&self, buffer: &mut Vec) { + leb128::write::unsigned(buffer, self.next_version).unwrap(); + } +} + #[cfg(test)] mod tests { use zksync_types::H256; @@ -440,7 +488,7 @@ mod tests { assert_eq!(buffer[64], 42); // leaf index assert_eq!(buffer.len(), 65); - let leaf_copy = LeafNode::deserialize(&buffer).unwrap(); + let leaf_copy = LeafNode::deserialize(&buffer, true).unwrap(); assert_eq!(leaf_copy, leaf); } @@ -471,7 +519,7 @@ mod tests { let child_count = bitmap.count_ones(); assert_eq!(child_count, 2); - let node_copy = InternalNode::deserialize(&buffer).unwrap(); + let node_copy = InternalNode::deserialize(&buffer, true).unwrap(); assert_eq!(node_copy, node); } @@ -482,7 +530,7 @@ mod tests { root.serialize(&mut buffer); assert_eq!(buffer, [0]); - let root_copy = Root::deserialize(&buffer).unwrap(); + let root_copy = Root::deserialize(&buffer, true).unwrap(); assert_eq!(root_copy, root); } @@ -494,7 +542,7 @@ mod tests { root.serialize(&mut buffer); assert_eq!(buffer[0], 1); - let root_copy = Root::deserialize(&buffer).unwrap(); + let root_copy = Root::deserialize(&buffer, true).unwrap(); assert_eq!(root_copy, root); } @@ -506,7 +554,7 @@ mod tests { root.serialize(&mut buffer); assert_eq!(buffer[0], 2); - let root_copy = Root::deserialize(&buffer).unwrap(); + let root_copy = Root::deserialize(&buffer, true).unwrap(); assert_eq!(root_copy, root); } } diff --git a/core/lib/merkle_tree/src/types/internal.rs b/core/lib/merkle_tree/src/types/internal.rs index 399f6c840a3c..2db075d92212 100644 --- a/core/lib/merkle_tree/src/types/internal.rs +++ b/core/lib/merkle_tree/src/types/internal.rs @@ -2,7 +2,9 @@ //! some of these types are declared as public and can be even exported using the `unstable` module. //! Still, logically these types are private, so adding them to new public APIs etc. is a logical error. -use std::{collections::HashMap, fmt, num::NonZeroU64}; +use std::{collections::HashMap, fmt, num::NonZeroU64, str::FromStr}; + +use anyhow::Context; use crate::{ hasher::{HashTree, InternalNodeCache}, @@ -276,6 +278,34 @@ impl fmt::Debug for Nibbles { } } +impl FromStr for Nibbles { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + anyhow::ensure!(s.len() <= KEY_SIZE * 2, "too many nibbles"); + let mut bytes = NibblesBytes::default(); + for (i, byte) in s.bytes().enumerate() { + let nibble = match byte { + b'0'..=b'9' => byte - b'0', + b'A'..=b'F' => byte - b'A' + 10, + b'a'..=b'f' => byte - b'a' + 10, + _ => anyhow::bail!("unexpected nibble: {byte:?}"), + }; + + assert!(nibble < 16); + if i % 2 == 0 { + bytes[i / 2] = nibble * 16; + } else { + bytes[i / 2] += nibble; + } + } + Ok(Self { + nibble_count: s.len(), + bytes, + }) + } +} + /// Versioned key in a radix-16 Merkle tree. #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub struct NodeKey { @@ -283,12 +313,31 @@ pub struct NodeKey { pub(crate) nibbles: Nibbles, } -impl fmt::Debug for NodeKey { +impl fmt::Display for NodeKey { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { write!(formatter, "{}:{}", self.version, self.nibbles) } } +impl fmt::Debug for NodeKey { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, formatter) + } +} + +impl FromStr for NodeKey { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + let (version, nibbles) = s + .split_once(':') + .context("node key does not contain `:` delimiter")?; + let version = version.parse().context("invalid key version")?; + let nibbles = nibbles.parse().context("invalid nibbles")?; + Ok(Self { version, nibbles }) + } +} + impl NodeKey { pub(crate) const fn empty(version: u64) -> Self { Self { @@ -331,19 +380,13 @@ impl NodeKey { } } -impl fmt::Display for NodeKey { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(formatter, "{}:{}", self.version, self.nibbles) - } -} - /// Leaf node of the tree. #[derive(Debug, Clone, Copy)] #[cfg_attr(test, derive(PartialEq, Eq))] pub struct LeafNode { - pub(crate) full_key: Key, - pub(crate) value_hash: ValueHash, - pub(crate) leaf_index: u64, + pub full_key: Key, + pub value_hash: ValueHash, + pub leaf_index: u64, } impl LeafNode { @@ -364,7 +407,7 @@ impl LeafNode { /// Reference to a child in an [`InternalNode`]. #[derive(Debug, Clone, Copy)] #[cfg_attr(test, derive(PartialEq, Eq))] -pub(crate) struct ChildRef { +pub struct ChildRef { pub hash: ValueHash, pub version: u64, pub is_leaf: bool, @@ -449,7 +492,7 @@ impl InternalNode { self.cache.get_or_insert(cache) } - pub(crate) fn children(&self) -> impl Iterator + '_ { + pub fn children(&self) -> impl Iterator + '_ { self.children.iter() } @@ -510,6 +553,17 @@ impl From for Node { } } +/// Raw node fetched from a database. +#[derive(Debug)] +pub struct RawNode { + /// Bytes for a serialized node. + pub raw: Vec, + /// Leaf if a node can be deserialized into it. + pub leaf: Option, + /// Internal node if a node can be deserialized into it. + pub internal: Option, +} + /// Root node of the tree. Besides a [`Node`], contains the general information about the tree /// (e.g., the number of leaves). #[derive(Debug, Clone)] @@ -614,15 +668,23 @@ mod tests { fn nibbles_and_node_key_display() { let nibbles = Nibbles::new(&TEST_KEY, 5); assert_eq!(nibbles.to_string(), "deadb"); + let restored: Nibbles = nibbles.to_string().parse().unwrap(); + assert_eq!(restored, nibbles); let nibbles = Nibbles::new(&TEST_KEY, 6); assert_eq!(nibbles.to_string(), "deadbe"); + let restored: Nibbles = nibbles.to_string().parse().unwrap(); + assert_eq!(restored, nibbles); let nibbles = Nibbles::new(&TEST_KEY, 9); assert_eq!(nibbles.to_string(), "deadbeef0"); + let restored: Nibbles = nibbles.to_string().parse().unwrap(); + assert_eq!(restored, nibbles); let node_key = nibbles.with_version(3); assert_eq!(node_key.to_string(), "3:deadbeef0"); + let restored: NodeKey = node_key.to_string().parse().unwrap(); + assert_eq!(restored, node_key); } #[test] diff --git a/core/lib/merkle_tree/src/types/mod.rs b/core/lib/merkle_tree/src/types/mod.rs index 807ae0238769..63db4b318b27 100644 --- a/core/lib/merkle_tree/src/types/mod.rs +++ b/core/lib/merkle_tree/src/types/mod.rs @@ -6,7 +6,7 @@ pub(crate) use self::internal::{ ChildRef, Nibbles, NibblesBytes, StaleNodeKey, TreeTags, HASH_SIZE, KEY_SIZE, TREE_DEPTH, }; pub use self::internal::{ - InternalNode, LeafNode, Manifest, Node, NodeKey, ProfiledTreeOperation, Root, + InternalNode, LeafNode, Manifest, Node, NodeKey, ProfiledTreeOperation, RawNode, Root, }; mod internal; diff --git a/core/lib/merkle_tree/src/utils.rs b/core/lib/merkle_tree/src/utils.rs index 4771a940f2c8..a3c025a8b7bd 100644 --- a/core/lib/merkle_tree/src/utils.rs +++ b/core/lib/merkle_tree/src/utils.rs @@ -165,6 +165,49 @@ impl Iterator for MergingIter { impl ExactSizeIterator for MergingIter {} +#[cfg(test)] +pub(crate) mod testonly { + use crate::{Key, MerkleTree, PruneDatabase, TreeEntry, ValueHash}; + + pub(crate) fn setup_tree_with_stale_keys(db: impl PruneDatabase, incorrect_truncation: bool) { + let mut tree = MerkleTree::new(db).unwrap(); + let kvs: Vec<_> = (0_u64..100) + .map(|i| TreeEntry::new(Key::from(i), i + 1, ValueHash::zero())) + .collect(); + tree.extend(kvs).unwrap(); + + let overridden_kvs = vec![TreeEntry::new( + Key::from(0), + 1, + ValueHash::repeat_byte(0xaa), + )]; + tree.extend(overridden_kvs).unwrap(); + + let stale_keys = tree.db.stale_keys(1); + assert!( + stale_keys.iter().any(|key| !key.is_empty()), + "{stale_keys:?}" + ); + + // Revert `overridden_kvs`. + if incorrect_truncation { + tree.truncate_recent_versions_incorrectly(1).unwrap(); + } else { + tree.truncate_recent_versions(1).unwrap(); + } + assert_eq!(tree.latest_version(), Some(0)); + let future_stale_keys = tree.db.stale_keys(1); + assert_eq!(future_stale_keys.is_empty(), !incorrect_truncation); + + // Add a new version without the key. To make the matter more egregious, the inserted key + // differs from all existing keys, starting from the first nibble. + let new_key = Key::from_big_endian(&[0xaa; 32]); + let new_kvs = vec![TreeEntry::new(new_key, 101, ValueHash::repeat_byte(0xaa))]; + tree.extend(new_kvs).unwrap(); + assert_eq!(tree.latest_version(), Some(1)); + } +} + #[cfg(test)] mod tests { use zksync_types::U256; diff --git a/core/lib/merkle_tree/tests/integration/domain.rs b/core/lib/merkle_tree/tests/integration/domain.rs index abd3dbbcd3f3..fa7ec4cfde30 100644 --- a/core/lib/merkle_tree/tests/integration/domain.rs +++ b/core/lib/merkle_tree/tests/integration/domain.rs @@ -68,6 +68,31 @@ fn basic_workflow() { tree.verify_consistency(L1BatchNumber(0)).unwrap(); assert_eq!(tree.root_hash(), expected_root_hash); assert_eq!(tree.next_l1_batch_number(), L1BatchNumber(1)); + + let keys = ["0:", "0:0"].map(|key| key.parse().unwrap()); + let raw_nodes = tree.reader().raw_nodes(&keys); + assert_eq!(raw_nodes.len(), 2); + let raw_root = raw_nodes[0].as_ref().unwrap(); + assert!(!raw_root.raw.is_empty()); + assert!(raw_root.internal.is_some()); + assert!(raw_root.leaf.is_none()); + + let raw_node = raw_nodes[1].as_ref().unwrap(); + assert!(!raw_node.raw.is_empty()); + assert!(raw_node.leaf.is_none()); + let raw_node = raw_node.internal.as_ref().unwrap(); + + let (nibble, _) = raw_node + .children() + .find(|(_, child_ref)| child_ref.is_leaf) + .unwrap(); + let leaf_key = format!("0:0{nibble:x}").parse().unwrap(); + let raw_nodes = tree.reader().raw_nodes(&[leaf_key]); + assert_eq!(raw_nodes.len(), 1); + let raw_leaf = raw_nodes.into_iter().next().unwrap().expect("no leaf"); + assert!(!raw_leaf.raw.is_empty()); + assert!(raw_leaf.leaf.is_some()); + assert!(raw_leaf.internal.is_none()); } #[test] diff --git a/core/lib/merkle_tree/tests/integration/merkle_tree.rs b/core/lib/merkle_tree/tests/integration/merkle_tree.rs index fc26cafe9ba7..789872d18730 100644 --- a/core/lib/merkle_tree/tests/integration/merkle_tree.rs +++ b/core/lib/merkle_tree/tests/integration/merkle_tree.rs @@ -6,8 +6,8 @@ use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; use test_casing::test_casing; use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ - Database, HashTree, MerkleTree, PatchSet, Patched, TreeEntry, TreeInstruction, TreeLogEntry, - TreeRangeDigest, + Database, HashTree, MerkleTree, PatchSet, Patched, PruneDatabase, TreeEntry, TreeInstruction, + TreeLogEntry, TreeRangeDigest, }; use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; @@ -270,7 +270,7 @@ fn accumulating_commits(chunk_size: usize) { test_accumulated_commits(PatchSet::default(), chunk_size); } -fn test_root_hash_computing_with_reverts(db: &mut impl Database) { +fn test_root_hash_computing_with_reverts(db: &mut impl PruneDatabase) { let (kvs, expected_hash) = &*ENTRIES_AND_HASH; let (initial_update, final_update) = kvs.split_at(75); let key_updates: Vec<_> = kvs diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index e49086a6b8b1..128e6fc0c4af 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -26,9 +26,9 @@ circuit_sequencer_api_1_5_0.workspace = true zksync_types.workspace = true zksync_contracts.workspace = true -zksync_utils.workspace = true zksync_system_constants.workspace = true zksync_vm_interface.workspace = true +zksync_mini_merkle_tree.workspace = true anyhow.workspace = true hex.workspace = true @@ -42,6 +42,7 @@ ethabi.workspace = true [dev-dependencies] assert_matches.workspace = true pretty_assertions.workspace = true +rand.workspace = true test-casing.workspace = true -zksync_test_account.workspace = true +zksync_test_contracts.workspace = true zksync_eth_signer.workspace = true diff --git a/core/lib/multivm/README.md b/core/lib/multivm/README.md index 5e2af426ae5b..34883db5990a 100644 --- a/core/lib/multivm/README.md +++ b/core/lib/multivm/README.md @@ -4,3 +4,17 @@ This crate represents a wrapper over several versions of VM that have been used glue code that allows switching the VM version based on the externally provided marker while preserving the public interface. This crate exists to enable the external node to process breaking upgrades and re-execute all the transactions from the genesis block. + +## Developer guidelines + +### Adding tests + +If you want to add unit tests for the VM wrapper, consider the following: + +- Whenever possible, make tests reusable; declare test logic in the [`testonly`](src/versions/testonly/mod.rs) module, + and then instantiate tests using this logic for the supported VM versions. If necessary, extend the tested VM trait so + that test logic can be defined in a generic way. See the `testonly` module docs for more detailed guidelines. +- If you define a generic test, don't forget to add its instantiations for all supported VMs (`vm_latest`, `vm_fast` and + `shadow`). `shadow` tests allow checking VM divergences for free! +- Do not use an RNG where it can be avoided (e.g., for test contract addresses). +- Avoid using zero / default values in cases they can be treated specially by the tested code. diff --git a/core/lib/multivm/src/glue/tracers/mod.rs b/core/lib/multivm/src/glue/tracers/mod.rs index bf2f67cae501..f5a854ecbaaf 100644 --- a/core/lib/multivm/src/glue/tracers/mod.rs +++ b/core/lib/multivm/src/glue/tracers/mod.rs @@ -7,7 +7,7 @@ //! Different VM versions may have distinct requirements and types for Tracers. To accommodate these differences, //! this module defines one primary trait: //! -//! - `MultiVMTracer`: This trait represents a tracer that can be converted into a tracer for +//! - `MultiVmTracer`: This trait represents a tracer that can be converted into a tracer for //! a specific VM version. //! //! Specific traits for each VM version, which support Custom Tracers: @@ -19,22 +19,22 @@ //! into a form compatible with the vm_virtual_blocks version. //! It defines a method `vm_virtual_blocks` for obtaining a boxed tracer. //! -//! For `MultiVMTracer` to be implemented, the Tracer must implement all N currently +//! For `MultiVmTracer` to be implemented, the Tracer must implement all N currently //! existing sub-traits. //! //! ## Adding a new VM version //! -//! To add support for one more VM version to MultiVMTracer, one needs to: +//! To add support for one more VM version to MultiVmTracer, one needs to: //! - Create a new trait performing conversion to the specified VM tracer, e.g., `IntoTracer`. -//! - Add this trait as a trait bound to the `MultiVMTracer`. -//! - Add this trait as a trait bound for `T` in `MultiVMTracer` implementation. +//! - Add this trait as a trait bound to the `MultiVmTracer`. +//! - Add this trait as a trait bound for `T` in `MultiVmTracer` implementation. //! - Implement the trait for `T` with a bound to `VmTracer` for a specific version. use crate::{interface::storage::WriteStorage, tracers::old::OldTracers, HistoryMode}; -pub type MultiVmTracerPointer = Box>; +pub type MultiVmTracerPointer = Box>; -pub trait MultiVMTracer: +pub trait MultiVmTracer: IntoLatestTracer + IntoVmVirtualBlocksTracer + IntoVmRefundsEnhancementTracer @@ -168,7 +168,7 @@ where } } -impl MultiVMTracer for T +impl MultiVmTracer for T where S: WriteStorage, H: HistoryMode, diff --git a/core/lib/multivm/src/glue/types/vm/block_context_mode.rs b/core/lib/multivm/src/glue/types/vm/block_context_mode.rs index 094339705e14..66634e504386 100644 --- a/core/lib/multivm/src/glue/types/vm/block_context_mode.rs +++ b/core/lib/multivm/src/glue/types/vm/block_context_mode.rs @@ -1,4 +1,4 @@ -use zksync_utils::h256_to_u256; +use zksync_types::h256_to_u256; use crate::glue::GlueFrom; diff --git a/core/lib/multivm/src/glue/types/vm/storage_log.rs b/core/lib/multivm/src/glue/types/vm/storage_log.rs index 322bc491e9ab..5f79ca9e9e15 100644 --- a/core/lib/multivm/src/glue/types/vm/storage_log.rs +++ b/core/lib/multivm/src/glue/types/vm/storage_log.rs @@ -1,7 +1,6 @@ use zksync_types::{ - zk_evm_types::LogQuery, StorageLog, StorageLogQuery, StorageLogWithPreviousValue, + u256_to_h256, zk_evm_types::LogQuery, StorageLog, StorageLogQuery, StorageLogWithPreviousValue, }; -use zksync_utils::u256_to_h256; use crate::glue::{GlueFrom, GlueInto}; diff --git a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs index 50bb19938fe7..c4eb0b1741aa 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries as sort_storage_access_queries_1_3_3; use itertools::Itertools; use zk_evm_1_3_1::aux_structures::LogQuery as LogQuery_1_3_1; @@ -47,7 +49,7 @@ impl GlueFrom for crate::interface::Fi circuit_statistic: Default::default(), }, refunds: Refunds::default(), - new_known_factory_deps: None, + dynamic_factory_deps: HashMap::new(), }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -104,7 +106,7 @@ impl GlueFrom for crate::interface::Fi circuit_statistic: Default::default(), }, refunds: Refunds::default(), - new_known_factory_deps: None, + dynamic_factory_deps: HashMap::new(), }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -160,7 +162,7 @@ impl GlueFrom for crate::interface: circuit_statistic: Default::default(), }, refunds: Refunds::default(), - new_known_factory_deps: None, + dynamic_factory_deps: HashMap::new(), }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -230,7 +232,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), - new_known_factory_deps: None, + dynamic_factory_deps: HashMap::new(), } } } @@ -263,7 +265,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), - new_known_factory_deps: None, + dynamic_factory_deps: HashMap::new(), } } } @@ -312,7 +314,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), - new_known_factory_deps: None, + dynamic_factory_deps: HashMap::new(), } } } diff --git a/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs index 4c4cffcc6876..fa251116b85c 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use crate::glue::{GlueFrom, GlueInto}; impl GlueFrom @@ -22,7 +24,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, - new_known_factory_deps: None, + dynamic_factory_deps: HashMap::new(), } } } @@ -49,7 +51,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, - new_known_factory_deps: None, + dynamic_factory_deps: HashMap::new(), } } } @@ -76,7 +78,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, - new_known_factory_deps: None, + dynamic_factory_deps: HashMap::new(), } } } diff --git a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs index 8978d4348edd..fcbcde990f37 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use crate::{ glue::{GlueFrom, GlueInto}, interface::{ @@ -66,14 +68,14 @@ impl GlueFrom VmExecutionResultAndLogs { result: ExecutionResult::Halt { reason: halt }, logs: Default::default(), statistics: Default::default(), refunds: Default::default(), - new_known_factory_deps: None, + dynamic_factory_deps: HashMap::new(), }, } } @@ -102,14 +104,14 @@ impl logs: Default::default(), statistics: Default::default(), refunds: Default::default(), - new_known_factory_deps: None, + dynamic_factory_deps: HashMap::new(), }, TxRevertReason::Halt(halt) => VmExecutionResultAndLogs { result: ExecutionResult::Halt { reason: halt }, logs: Default::default(), statistics: Default::default(), refunds: Default::default(), - new_known_factory_deps: None, + dynamic_factory_deps: HashMap::new(), }, } } @@ -133,7 +135,7 @@ impl GlueFrom { unreachable!("Halt is the only revert reason for VM 5") diff --git a/core/lib/multivm/src/glue/types/zk_evm_1_3_1.rs b/core/lib/multivm/src/glue/types/zk_evm_1_3_1.rs index dfe1121c04ec..08556b7b901a 100644 --- a/core/lib/multivm/src/glue/types/zk_evm_1_3_1.rs +++ b/core/lib/multivm/src/glue/types/zk_evm_1_3_1.rs @@ -1,4 +1,4 @@ -use zksync_utils::u256_to_h256; +use zksync_types::u256_to_h256; use crate::glue::{GlueFrom, GlueInto}; diff --git a/core/lib/multivm/src/glue/types/zk_evm_1_3_3.rs b/core/lib/multivm/src/glue/types/zk_evm_1_3_3.rs index 4c554c1bd53d..ab13de140cfb 100644 --- a/core/lib/multivm/src/glue/types/zk_evm_1_3_3.rs +++ b/core/lib/multivm/src/glue/types/zk_evm_1_3_3.rs @@ -2,8 +2,10 @@ use zk_evm_1_3_3::{ aux_structures::{LogQuery as LogQuery_1_3_3, Timestamp as Timestamp_1_3_3}, zkevm_opcode_defs::FarCallOpcode as FarCallOpcode_1_3_3, }; -use zksync_types::zk_evm_types::{FarCallOpcode, LogQuery, Timestamp}; -use zksync_utils::u256_to_h256; +use zksync_types::{ + u256_to_h256, + zk_evm_types::{FarCallOpcode, LogQuery, Timestamp}, +}; use crate::glue::{GlueFrom, GlueInto}; diff --git a/core/lib/multivm/src/glue/types/zk_evm_1_4_0.rs b/core/lib/multivm/src/glue/types/zk_evm_1_4_0.rs index 5af0e57c4bf9..c25a19b1aa3d 100644 --- a/core/lib/multivm/src/glue/types/zk_evm_1_4_0.rs +++ b/core/lib/multivm/src/glue/types/zk_evm_1_4_0.rs @@ -1,4 +1,4 @@ -use zksync_utils::u256_to_h256; +use zksync_types::u256_to_h256; use crate::glue::GlueFrom; diff --git a/core/lib/multivm/src/glue/types/zk_evm_1_4_1.rs b/core/lib/multivm/src/glue/types/zk_evm_1_4_1.rs index 933eafbb0354..6a8138bc2f24 100644 --- a/core/lib/multivm/src/glue/types/zk_evm_1_4_1.rs +++ b/core/lib/multivm/src/glue/types/zk_evm_1_4_1.rs @@ -2,8 +2,10 @@ use zk_evm_1_4_1::{ aux_structures::{LogQuery as LogQuery_1_4_1, Timestamp as Timestamp_1_4_1}, zkevm_opcode_defs::FarCallOpcode as FarCallOpcode_1_4_1, }; -use zksync_types::zk_evm_types::{FarCallOpcode, LogQuery, Timestamp}; -use zksync_utils::u256_to_h256; +use zksync_types::{ + u256_to_h256, + zk_evm_types::{FarCallOpcode, LogQuery, Timestamp}, +}; use crate::glue::{GlueFrom, GlueInto}; diff --git a/core/lib/multivm/src/glue/types/zk_evm_1_5_0.rs b/core/lib/multivm/src/glue/types/zk_evm_1_5_0.rs index eb1c8e1dd7e8..343843503bdd 100644 --- a/core/lib/multivm/src/glue/types/zk_evm_1_5_0.rs +++ b/core/lib/multivm/src/glue/types/zk_evm_1_5_0.rs @@ -1,4 +1,4 @@ -use zksync_utils::u256_to_h256; +use zksync_types::u256_to_h256; use crate::glue::{GlueFrom, GlueInto}; diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs index 520274c14ae0..fc4085d9b021 100644 --- a/core/lib/multivm/src/lib.rs +++ b/core/lib/multivm/src/lib.rs @@ -10,7 +10,7 @@ pub use zksync_vm_interface as interface; pub use crate::{ glue::{ history_mode::HistoryMode, - tracers::{MultiVMTracer, MultiVmTracerPointer}, + tracers::{MultiVmTracer, MultiVmTracerPointer}, }, versions::{ vm_1_3_2, vm_1_4_1, vm_1_4_2, vm_boojum_integration, vm_fast, vm_latest, vm_m5, vm_m6, @@ -20,6 +20,7 @@ pub use crate::{ }; mod glue; +pub mod pubdata_builders; pub mod tracers; pub mod utils; mod versions; diff --git a/core/lib/multivm/src/pubdata_builders/mod.rs b/core/lib/multivm/src/pubdata_builders/mod.rs new file mode 100644 index 000000000000..c52c4c70c86a --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/mod.rs @@ -0,0 +1,24 @@ +use std::rc::Rc; + +pub use rollup::RollupPubdataBuilder; +pub use validium::ValidiumPubdataBuilder; +use zksync_types::commitment::{L1BatchCommitmentMode, PubdataParams}; + +use crate::interface::pubdata::PubdataBuilder; + +mod rollup; +#[cfg(test)] +mod tests; +mod utils; +mod validium; + +pub fn pubdata_params_to_builder(params: PubdataParams) -> Rc { + match params.pubdata_type { + L1BatchCommitmentMode::Rollup => { + Rc::new(RollupPubdataBuilder::new(params.l2_da_validator_address)) + } + L1BatchCommitmentMode::Validium => { + Rc::new(ValidiumPubdataBuilder::new(params.l2_da_validator_address)) + } + } +} diff --git a/core/lib/multivm/src/pubdata_builders/rollup.rs b/core/lib/multivm/src/pubdata_builders/rollup.rs new file mode 100644 index 000000000000..4a818dfe2314 --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/rollup.rs @@ -0,0 +1,128 @@ +use zksync_types::{ + ethabi, + ethabi::{ParamType, Token}, + l2_to_l1_log::l2_to_l1_logs_tree_size, + writes::compress_state_diffs, + Address, ProtocolVersionId, +}; + +use super::utils::{ + build_chained_bytecode_hash, build_chained_log_hash, build_chained_message_hash, + build_logs_root, encode_user_logs, +}; +use crate::interface::pubdata::{PubdataBuilder, PubdataInput}; + +#[derive(Debug, Clone, Copy)] +pub struct RollupPubdataBuilder { + pub l2_da_validator: Address, +} + +impl RollupPubdataBuilder { + pub fn new(l2_da_validator: Address) -> Self { + Self { l2_da_validator } + } +} + +impl PubdataBuilder for RollupPubdataBuilder { + fn l2_da_validator(&self) -> Address { + self.l2_da_validator + } + + fn l1_messenger_operator_input( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec { + if protocol_version.is_pre_gateway() { + let mut operator_input = vec![]; + extend_from_pubdata_input(&mut operator_input, input); + + // Extend with uncompressed state diffs. + operator_input.extend((input.state_diffs.len() as u32).to_be_bytes()); + for state_diff in &input.state_diffs { + operator_input.extend(state_diff.encode_padded()); + } + + operator_input + } else { + let mut pubdata = vec![]; + extend_from_pubdata_input(&mut pubdata, input); + + // Extend with uncompressed state diffs. + pubdata.extend((input.state_diffs.len() as u32).to_be_bytes()); + for state_diff in &input.state_diffs { + pubdata.extend(state_diff.encode_padded()); + } + + let chained_log_hash = build_chained_log_hash(&input.user_logs); + let log_root_hash = + build_logs_root(&input.user_logs, l2_to_l1_logs_tree_size(protocol_version)); + let chained_msg_hash = build_chained_message_hash(&input.l2_to_l1_messages); + let chained_bytecodes_hash = build_chained_bytecode_hash(&input.published_bytecodes); + + let l2_da_header = vec![ + Token::FixedBytes(chained_log_hash), + Token::FixedBytes(log_root_hash), + Token::FixedBytes(chained_msg_hash), + Token::FixedBytes(chained_bytecodes_hash), + Token::Bytes(pubdata), + ]; + + // Selector of `IL2DAValidator::validatePubdata`. + let func_selector = ethabi::short_signature( + "validatePubdata", + &[ + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::Bytes, + ], + ); + + [func_selector.to_vec(), ethabi::encode(&l2_da_header)].concat() + } + } + + fn settlement_layer_pubdata( + &self, + input: &PubdataInput, + _protocol_version: ProtocolVersionId, + ) -> Vec { + let mut pubdata = vec![]; + extend_from_pubdata_input(&mut pubdata, input); + + pubdata + } +} + +fn extend_from_pubdata_input(buffer: &mut Vec, pubdata_input: &PubdataInput) { + let PubdataInput { + user_logs, + l2_to_l1_messages, + published_bytecodes, + state_diffs, + } = pubdata_input; + + // Adding user L2->L1 logs. + buffer.extend(encode_user_logs(user_logs)); + + // Encoding L2->L1 messages + // Format: `[(numberOfMessages as u32) || (messages[1].len() as u32) || messages[1] || ... || (messages[n].len() as u32) || messages[n]]` + buffer.extend((l2_to_l1_messages.len() as u32).to_be_bytes()); + for message in l2_to_l1_messages { + buffer.extend((message.len() as u32).to_be_bytes()); + buffer.extend(message); + } + // Encoding bytecodes + // Format: `[(numberOfBytecodes as u32) || (bytecodes[1].len() as u32) || bytecodes[1] || ... || (bytecodes[n].len() as u32) || bytecodes[n]]` + buffer.extend((published_bytecodes.len() as u32).to_be_bytes()); + for bytecode in published_bytecodes { + buffer.extend((bytecode.len() as u32).to_be_bytes()); + buffer.extend(bytecode); + } + // Encoding state diffs + // Format: `[size of compressed state diffs u32 || compressed state diffs || (# state diffs: intial + repeated) as u32 || sorted state diffs by ]` + let state_diffs_compressed = compress_state_diffs(state_diffs.clone()); + buffer.extend(state_diffs_compressed); +} diff --git a/core/lib/multivm/src/pubdata_builders/tests.rs b/core/lib/multivm/src/pubdata_builders/tests.rs new file mode 100644 index 000000000000..b06cb9405aa7 --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/tests.rs @@ -0,0 +1,122 @@ +use zksync_types::{ + u256_to_h256, writes::StateDiffRecord, Address, ProtocolVersionId, + ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS, +}; + +use super::{rollup::RollupPubdataBuilder, validium::ValidiumPubdataBuilder}; +use crate::interface::pubdata::{L1MessengerL2ToL1Log, PubdataBuilder, PubdataInput}; + +fn mock_input() -> PubdataInput { + // Just using some constant addresses for tests + let addr1 = BOOTLOADER_ADDRESS; + let addr2 = ACCOUNT_CODE_STORAGE_ADDRESS; + + let user_logs = vec![L1MessengerL2ToL1Log { + l2_shard_id: 0, + is_service: false, + tx_number_in_block: 0, + sender: addr1, + key: 1.into(), + value: 128.into(), + }]; + + let l2_to_l1_messages = vec![hex::decode("deadbeef").unwrap()]; + + let published_bytecodes = vec![hex::decode("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb").unwrap()]; + + // For covering more cases, we have two state diffs: + // One with enumeration index present (and so it is a repeated write) and the one without it. + let state_diffs = vec![ + StateDiffRecord { + address: addr2, + key: 155.into(), + derived_key: u256_to_h256(125.into()).0, + enumeration_index: 12, + initial_value: 11.into(), + final_value: 12.into(), + }, + StateDiffRecord { + address: addr2, + key: 156.into(), + derived_key: u256_to_h256(126.into()).0, + enumeration_index: 0, + initial_value: 0.into(), + final_value: 14.into(), + }, + ]; + + PubdataInput { + user_logs, + l2_to_l1_messages, + published_bytecodes, + state_diffs, + } +} + +#[test] +fn test_rollup_pubdata_building() { + let input = mock_input(); + + let rollup_pubdata_builder = RollupPubdataBuilder::new(Address::zero()); + + let actual = + rollup_pubdata_builder.l1_messenger_operator_input(&input, ProtocolVersionId::Version24); + let expected = "00000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `l1_messenger_operator_input` (pre gateway)" + ); + + let actual = + rollup_pubdata_builder.settlement_layer_pubdata(&input, ProtocolVersionId::Version24); + let expected = "00000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `settlement_layer_pubdata` (pre gateway)" + ); + + let actual = + rollup_pubdata_builder.l1_messenger_operator_input(&input, ProtocolVersionId::Version27); + let expected = "89f9a07233e608561d90f7c4e7bcea24d718e425a6bd6c8eefb48a334366143694c75fae278944d856d68e33bbd32937cb3a1ea35cbf7d6eeeb1150f500dd0d64d0efe420d6dafe5897eab2fc27b2e47af303397ed285ace146d836d042717b0a3dc4b28a603a33b28ce1d5c52c593a46a15a99f1afa1c1d92715284288958fd54a93de700000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000032300000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `l1_messenger_operator_input` (post gateway)" + ); + + let actual = + rollup_pubdata_builder.settlement_layer_pubdata(&input, ProtocolVersionId::Version27); + let expected = "00000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `settlement_layer_pubdata` (post gateway)" + ); +} + +#[test] +fn test_validium_pubdata_building() { + let input = mock_input(); + + let validium_pubdata_builder = ValidiumPubdataBuilder::new(Address::zero()); + + let actual = + validium_pubdata_builder.l1_messenger_operator_input(&input, ProtocolVersionId::Version27); + let expected = "89f9a07233e608561d90f7c4e7bcea24d718e425a6bd6c8eefb48a334366143694c75fae278944d856d68e33bbd32937cb3a1ea35cbf7d6eeeb1150f500dd0d64d0efe420d6dafe5897eab2fc27b2e47af303397ed285ace146d836d042717b0a3dc4b28a603a33b28ce1d5c52c593a46a15a99f1afa1c1d92715284288958fd54a93de700000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000005c000000010000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `l1_messenger_operator_input`" + ); + + let actual = + validium_pubdata_builder.settlement_layer_pubdata(&input, ProtocolVersionId::Version27); + let expected = "fa96e2436e6fb4d668f5a06681a7c53fcb199b2747ee624ee52a13e85aac5f1e"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `settlement_layer_pubdata`" + ); +} diff --git a/core/lib/multivm/src/pubdata_builders/utils.rs b/core/lib/multivm/src/pubdata_builders/utils.rs new file mode 100644 index 000000000000..83c9b9317640 --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/utils.rs @@ -0,0 +1,70 @@ +use zksync_mini_merkle_tree::MiniMerkleTree; +use zksync_types::{bytecode::BytecodeHash, web3::keccak256}; + +use crate::interface::pubdata::L1MessengerL2ToL1Log; + +pub(crate) fn build_chained_log_hash(user_logs: &[L1MessengerL2ToL1Log]) -> Vec { + let mut chained_log_hash = vec![0u8; 32]; + + for log in user_logs { + let log_bytes = log.packed_encoding(); + let hash = keccak256(&log_bytes); + + chained_log_hash = keccak256(&[chained_log_hash, hash.to_vec()].concat()).to_vec(); + } + + chained_log_hash +} + +pub(crate) fn build_logs_root( + user_logs: &[L1MessengerL2ToL1Log], + l2_to_l1_logs_tree_size: usize, +) -> Vec { + let logs = user_logs.iter().map(|log| { + let encoded = log.packed_encoding(); + let mut slice = [0u8; 88]; + slice.copy_from_slice(&encoded); + slice + }); + MiniMerkleTree::new(logs, Some(l2_to_l1_logs_tree_size)) + .merkle_root() + .as_bytes() + .to_vec() +} + +pub(crate) fn build_chained_message_hash(l2_to_l1_messages: &[Vec]) -> Vec { + let mut chained_msg_hash = vec![0u8; 32]; + + for msg in l2_to_l1_messages { + let hash = keccak256(msg); + + chained_msg_hash = keccak256(&[chained_msg_hash, hash.to_vec()].concat()).to_vec(); + } + + chained_msg_hash +} + +pub(crate) fn build_chained_bytecode_hash(published_bytecodes: &[Vec]) -> Vec { + let mut chained_bytecode_hash = vec![0u8; 32]; + + for bytecode in published_bytecodes { + let hash = BytecodeHash::for_bytecode(bytecode) + .value() + .to_fixed_bytes(); + chained_bytecode_hash = + keccak256(&[chained_bytecode_hash, hash.to_vec()].concat()).to_vec(); + } + + chained_bytecode_hash +} + +pub(crate) fn encode_user_logs(user_logs: &[L1MessengerL2ToL1Log]) -> Vec { + // Encoding user L2->L1 logs. + // Format: `[(numberOfL2ToL1Logs as u32) || l2tol1logs[1] || ... || l2tol1logs[n]]` + let mut result = vec![]; + result.extend((user_logs.len() as u32).to_be_bytes()); + for l2tol1log in user_logs { + result.extend(l2tol1log.packed_encoding()); + } + result +} diff --git a/core/lib/multivm/src/pubdata_builders/validium.rs b/core/lib/multivm/src/pubdata_builders/validium.rs new file mode 100644 index 000000000000..a9156e970aad --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/validium.rs @@ -0,0 +1,93 @@ +use zksync_types::{ + ethabi, + ethabi::{ParamType, Token}, + l2_to_l1_log::l2_to_l1_logs_tree_size, + web3::keccak256, + Address, ProtocolVersionId, +}; + +use super::utils::{ + build_chained_bytecode_hash, build_chained_log_hash, build_chained_message_hash, + build_logs_root, encode_user_logs, +}; +use crate::interface::pubdata::{PubdataBuilder, PubdataInput}; + +#[derive(Debug, Clone, Copy)] +pub struct ValidiumPubdataBuilder { + pub l2_da_validator: Address, +} + +impl ValidiumPubdataBuilder { + pub fn new(l2_da_validator: Address) -> Self { + Self { l2_da_validator } + } +} + +impl PubdataBuilder for ValidiumPubdataBuilder { + fn l2_da_validator(&self) -> Address { + self.l2_da_validator + } + + fn l1_messenger_operator_input( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec { + assert!( + !protocol_version.is_pre_gateway(), + "ValidiumPubdataBuilder must not be called for pre gateway" + ); + + let mut pubdata = vec![]; + pubdata.extend(encode_user_logs(&input.user_logs)); + + let chained_log_hash = build_chained_log_hash(&input.user_logs); + let log_root_hash = + build_logs_root(&input.user_logs, l2_to_l1_logs_tree_size(protocol_version)); + let chained_msg_hash = build_chained_message_hash(&input.l2_to_l1_messages); + let chained_bytecodes_hash = build_chained_bytecode_hash(&input.published_bytecodes); + + let l2_da_header = vec![ + Token::FixedBytes(chained_log_hash), + Token::FixedBytes(log_root_hash), + Token::FixedBytes(chained_msg_hash), + Token::FixedBytes(chained_bytecodes_hash), + Token::Bytes(pubdata), + ]; + + // Selector of `IL2DAValidator::validatePubdata`. + let func_selector = ethabi::short_signature( + "validatePubdata", + &[ + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::Bytes, + ], + ); + + [func_selector.to_vec(), ethabi::encode(&l2_da_header)] + .concat() + .to_vec() + } + + fn settlement_layer_pubdata( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec { + assert!( + !protocol_version.is_pre_gateway(), + "ValidiumPubdataBuilder must not be called for pre gateway" + ); + + let state_diffs_packed = input + .state_diffs + .iter() + .flat_map(|diff| diff.encode_padded()) + .collect::>(); + + keccak256(&state_diffs_packed).to_vec() + } +} diff --git a/core/lib/multivm/src/tracers/prestate_tracer/mod.rs b/core/lib/multivm/src/tracers/prestate_tracer/mod.rs index e8a7cc2cc420..363480c016bf 100644 --- a/core/lib/multivm/src/tracers/prestate_tracer/mod.rs +++ b/core/lib/multivm/src/tracers/prestate_tracer/mod.rs @@ -2,10 +2,9 @@ use std::{collections::HashMap, fmt, sync::Arc}; use once_cell::sync::OnceCell; use zksync_types::{ - get_code_key, get_nonce_key, web3::keccak256, AccountTreeId, Address, StorageKey, StorageValue, - H160, H256, L2_BASE_TOKEN_ADDRESS, U256, + address_to_h256, get_code_key, get_nonce_key, h256_to_u256, web3::keccak256, AccountTreeId, + Address, StorageKey, StorageValue, H160, H256, L2_BASE_TOKEN_ADDRESS, U256, }; -use zksync_utils::{address_to_h256, h256_to_u256}; use crate::interface::storage::{StoragePtr, WriteStorage}; diff --git a/core/lib/multivm/src/tracers/validator/mod.rs b/core/lib/multivm/src/tracers/validator/mod.rs index a1573f24c668..88249467a575 100644 --- a/core/lib/multivm/src/tracers/validator/mod.rs +++ b/core/lib/multivm/src/tracers/validator/mod.rs @@ -1,4 +1,8 @@ -use std::{collections::HashSet, marker::PhantomData, sync::Arc}; +use std::{ + collections::{BTreeSet, HashSet}, + marker::PhantomData, + sync::{Arc, Mutex}, +}; use once_cell::sync::OnceCell; use zksync_system_constants::{ @@ -6,9 +10,13 @@ use zksync_system_constants::{ L2_BASE_TOKEN_ADDRESS, MSG_VALUE_SIMULATOR_ADDRESS, SYSTEM_CONTEXT_ADDRESS, }; use zksync_types::{ - vm::VmVersion, web3::keccak256, AccountTreeId, Address, StorageKey, H256, U256, + address_to_u256, u256_to_h256, vm::VmVersion, web3::keccak256, AccountTreeId, Address, + StorageKey, H256, U256, +}; +use zksync_vm_interface::{ + tracer::{TimestampAsserterParams, ValidationTraces}, + L1BatchEnv, }; -use zksync_utils::{be_bytes_to_safe_address, u256_to_account_address, u256_to_h256}; use self::types::{NewTrustedValidationItems, ValidationTracerMode}; use crate::{ @@ -17,6 +25,7 @@ use crate::{ storage::{StoragePtr, WriteStorage}, tracer::{ValidationParams, ViolatedValidationRule}, }, + utils::bytecode::be_bytes_to_safe_address, }; mod types; @@ -32,7 +41,7 @@ mod vm_virtual_blocks; #[derive(Debug, Clone)] pub struct ValidationTracer { validation_mode: ValidationTracerMode, - auxilary_allowed_slots: HashSet, + auxilary_allowed_slots: BTreeSet, user_address: Address, #[allow(dead_code)] @@ -43,38 +52,47 @@ pub struct ValidationTracer { trusted_address_slots: HashSet<(Address, U256)>, computational_gas_used: u32, computational_gas_limit: u32, + timestamp_asserter_params: Option, vm_version: VmVersion, + l1_batch_env: L1BatchEnv, pub result: Arc>, + pub traces: Arc>, _marker: PhantomData H>, } type ValidationRoundResult = Result; impl ValidationTracer { - pub fn new( - params: ValidationParams, - vm_version: VmVersion, - ) -> (Self, Arc>) { - let result = Arc::new(OnceCell::new()); - ( - Self { - validation_mode: ValidationTracerMode::NoValidation, - auxilary_allowed_slots: Default::default(), - - should_stop_execution: false, - user_address: params.user_address, - paymaster_address: params.paymaster_address, - trusted_slots: params.trusted_slots, - trusted_addresses: params.trusted_addresses, - trusted_address_slots: params.trusted_address_slots, - computational_gas_used: 0, - computational_gas_limit: params.computational_gas_limit, - vm_version, - result: result.clone(), - _marker: Default::default(), - }, - result, - ) + const MAX_ALLOWED_SLOT_OFFSET: u32 = 127; + + pub fn new(params: ValidationParams, vm_version: VmVersion, l1_batch_env: L1BatchEnv) -> Self { + Self { + validation_mode: ValidationTracerMode::NoValidation, + auxilary_allowed_slots: Default::default(), + + should_stop_execution: false, + user_address: params.user_address, + paymaster_address: params.paymaster_address, + trusted_slots: params.trusted_slots, + trusted_addresses: params.trusted_addresses, + trusted_address_slots: params.trusted_address_slots, + computational_gas_used: 0, + computational_gas_limit: params.computational_gas_limit, + timestamp_asserter_params: params.timestamp_asserter_params.clone(), + vm_version, + result: Arc::new(OnceCell::new()), + traces: Arc::new(Mutex::new(ValidationTraces::default())), + _marker: Default::default(), + l1_batch_env, + } + } + + pub fn get_result(&self) -> Arc> { + self.result.clone() + } + + pub fn get_traces(&self) -> Arc> { + self.traces.clone() } fn process_validation_round_result(&mut self, result: ValidationRoundResult) { @@ -131,9 +149,15 @@ impl ValidationTracer { } // The user is allowed to touch its own slots or slots semantically related to him. + let from = u256_to_h256(key.saturating_sub(Self::MAX_ALLOWED_SLOT_OFFSET.into())); + let to = u256_to_h256(key); let valid_users_slot = address == self.user_address - || u256_to_account_address(&key) == self.user_address - || self.auxilary_allowed_slots.contains(&u256_to_h256(key)); + || key == address_to_u256(&self.user_address) + || self + .auxilary_allowed_slots + .range(from..=to) + .next() + .is_some(); if valid_users_slot { return true; } @@ -142,6 +166,11 @@ impl ValidationTracer { return true; } + // Allow to read any storage slot from the timesttamp asserter contract + if self.timestamp_asserter_params.as_ref().map(|x| x.address) == Some(msg_sender) { + return true; + } + false } @@ -189,6 +218,7 @@ impl ValidationTracer { trusted_addresses: self.trusted_addresses.clone(), trusted_address_slots: self.trusted_address_slots.clone(), computational_gas_limit: self.computational_gas_limit, + timestamp_asserter_params: self.timestamp_asserter_params.clone(), } } } diff --git a/core/lib/multivm/src/tracers/validator/vm_1_4_1/mod.rs b/core/lib/multivm/src/tracers/validator/vm_1_4_1/mod.rs index d1ddb2b44c80..3b5636c1c528 100644 --- a/core/lib/multivm/src/tracers/validator/vm_1_4_1/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_1_4_1/mod.rs @@ -3,8 +3,9 @@ use zk_evm_1_4_1::{ zkevm_opcode_defs::{ContextOpcode, FarCallABI, LogOpcode, Opcode}, }; use zksync_system_constants::KECCAK256_PRECOMPILE_ADDRESS; -use zksync_types::{get_code_key, AccountTreeId, StorageKey, H256}; -use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h256}; +use zksync_types::{ + get_code_key, h256_to_address, u256_to_address, u256_to_h256, AccountTreeId, StorageKey, H256, +}; use crate::{ interface::{ @@ -48,7 +49,7 @@ impl ValidationTracer { let packed_abi = data.src0_value.value; let call_destination_value = data.src1_value.value; - let called_address = u256_to_account_address(&call_destination_value); + let called_address = u256_to_address(&call_destination_value); let far_call_abi = FarCallABI::from_u256(packed_abi); if called_address == KECCAK256_PRECOMPILE_ADDRESS @@ -115,7 +116,7 @@ impl ValidationTracer { let value = storage.borrow_mut().read_value(&storage_key); return Ok(NewTrustedValidationItems { - new_trusted_addresses: vec![h256_to_account_address(&value)], + new_trusted_addresses: vec![h256_to_address(&value)], ..Default::default() }); } diff --git a/core/lib/multivm/src/tracers/validator/vm_1_4_2/mod.rs b/core/lib/multivm/src/tracers/validator/vm_1_4_2/mod.rs index a51644ff9ea2..0a48792aaa9e 100644 --- a/core/lib/multivm/src/tracers/validator/vm_1_4_2/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_1_4_2/mod.rs @@ -3,8 +3,9 @@ use zk_evm_1_4_1::{ zkevm_opcode_defs::{ContextOpcode, FarCallABI, LogOpcode, Opcode}, }; use zksync_system_constants::KECCAK256_PRECOMPILE_ADDRESS; -use zksync_types::{get_code_key, AccountTreeId, StorageKey, H256}; -use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h256}; +use zksync_types::{ + get_code_key, h256_to_address, u256_to_address, u256_to_h256, AccountTreeId, StorageKey, H256, +}; use crate::{ interface::{ @@ -48,7 +49,7 @@ impl ValidationTracer { let packed_abi = data.src0_value.value; let call_destination_value = data.src1_value.value; - let called_address = u256_to_account_address(&call_destination_value); + let called_address = u256_to_address(&call_destination_value); let far_call_abi = FarCallABI::from_u256(packed_abi); if called_address == KECCAK256_PRECOMPILE_ADDRESS @@ -115,7 +116,7 @@ impl ValidationTracer { let value = storage.borrow_mut().read_value(&storage_key); return Ok(NewTrustedValidationItems { - new_trusted_addresses: vec![h256_to_account_address(&value)], + new_trusted_addresses: vec![h256_to_address(&value)], ..Default::default() }); } diff --git a/core/lib/multivm/src/tracers/validator/vm_boojum_integration/mod.rs b/core/lib/multivm/src/tracers/validator/vm_boojum_integration/mod.rs index 7f9767a5e632..da6ffd4948cf 100644 --- a/core/lib/multivm/src/tracers/validator/vm_boojum_integration/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_boojum_integration/mod.rs @@ -3,8 +3,9 @@ use zk_evm_1_4_0::{ zkevm_opcode_defs::{ContextOpcode, FarCallABI, LogOpcode, Opcode}, }; use zksync_system_constants::KECCAK256_PRECOMPILE_ADDRESS; -use zksync_types::{get_code_key, AccountTreeId, StorageKey, H256}; -use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h256}; +use zksync_types::{ + get_code_key, h256_to_address, u256_to_address, u256_to_h256, AccountTreeId, StorageKey, H256, +}; use crate::{ interface::{ @@ -48,7 +49,7 @@ impl ValidationTracer { let packed_abi = data.src0_value.value; let call_destination_value = data.src1_value.value; - let called_address = u256_to_account_address(&call_destination_value); + let called_address = u256_to_address(&call_destination_value); let far_call_abi = FarCallABI::from_u256(packed_abi); if called_address == KECCAK256_PRECOMPILE_ADDRESS @@ -115,7 +116,7 @@ impl ValidationTracer { let value = storage.borrow_mut().read_value(&storage_key); return Ok(NewTrustedValidationItems { - new_trusted_addresses: vec![h256_to_account_address(&value)], + new_trusted_addresses: vec![h256_to_address(&value)], ..Default::default() }); } diff --git a/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs b/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs index c206bd6fb2ad..3c819384137f 100644 --- a/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs @@ -3,8 +3,10 @@ use zk_evm_1_5_0::{ zkevm_opcode_defs::{ContextOpcode, FarCallABI, LogOpcode, Opcode}, }; use zksync_system_constants::KECCAK256_PRECOMPILE_ADDRESS; -use zksync_types::{get_code_key, AccountTreeId, StorageKey, H256}; -use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h256}; +use zksync_types::{ + get_code_key, h256_to_address, u256_to_address, u256_to_h256, AccountTreeId, StorageKey, H256, + U256, +}; use crate::{ interface::{ @@ -26,6 +28,8 @@ use crate::{ HistoryMode, }; +pub const TIMESTAMP_ASSERTER_FUNCTION_SELECTOR: [u8; 4] = [0x5b, 0x1a, 0x0c, 0x91]; + impl ValidationTracer { fn check_user_restrictions_vm_latest( &mut self, @@ -46,7 +50,7 @@ impl ValidationTracer { let packed_abi = data.src0_value.value; let call_destination_value = data.src1_value.value; - let called_address = u256_to_account_address(&call_destination_value); + let called_address = u256_to_address(&call_destination_value); let far_call_abi = FarCallABI::from_u256(packed_abi); if called_address == KECCAK256_PRECOMPILE_ADDRESS @@ -81,6 +85,52 @@ impl ValidationTracer { called_address, )); } + // If this is a call to the timestamp asserter, extract the function arguments and store them in ValidationTraces. + // These arguments are used by the mempool for transaction filtering. The call data length should be 68 bytes: + // a 4-byte function selector followed by two U256 values. + if let Some(params) = &self.timestamp_asserter_params { + if called_address == params.address + && far_call_abi.memory_quasi_fat_pointer.length == 68 + { + let calldata_page = get_calldata_page_via_abi( + &far_call_abi, + state.vm_local_state.callstack.current.base_memory_page, + ); + let calldata = memory.read_unaligned_bytes( + calldata_page as usize, + far_call_abi.memory_quasi_fat_pointer.start as usize, + 68, + ); + + if calldata[..4] == TIMESTAMP_ASSERTER_FUNCTION_SELECTOR { + // start and end need to be capped to u64::MAX to avoid overflow + let start = U256::from_big_endian( + &calldata[calldata.len() - 64..calldata.len() - 32], + ) + .try_into() + .unwrap_or(u64::MAX); + let end = U256::from_big_endian(&calldata[calldata.len() - 32..]) + .try_into() + .unwrap_or(u64::MAX); + + // using self.l1_batch_env.timestamp is ok here because the tracer is always + // used in a oneshot execution mode + if end + < self.l1_batch_env.timestamp + + params.min_time_till_end.as_secs() + { + return Err( + ViolatedValidationRule::TimestampAssertionCloseToRangeEnd, + ); + } + + self.traces + .lock() + .unwrap() + .apply_timestamp_asserter_range(start..end); + } + } + } } } Opcode::Context(context) => { @@ -113,7 +163,7 @@ impl ValidationTracer { let value = storage.borrow_mut().read_value(&storage_key); return Ok(NewTrustedValidationItems { - new_trusted_addresses: vec![h256_to_account_address(&value)], + new_trusted_addresses: vec![h256_to_address(&value)], ..Default::default() }); } diff --git a/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs b/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs index 0badd7c58775..ea95c567181e 100644 --- a/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs @@ -3,8 +3,9 @@ use zk_evm_1_3_3::{ zkevm_opcode_defs::{ContextOpcode, FarCallABI, LogOpcode, Opcode}, }; use zksync_system_constants::KECCAK256_PRECOMPILE_ADDRESS; -use zksync_types::{get_code_key, AccountTreeId, StorageKey, H256}; -use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h256}; +use zksync_types::{ + get_code_key, h256_to_address, u256_to_address, u256_to_h256, AccountTreeId, StorageKey, H256, +}; use crate::{ interface::{ @@ -48,7 +49,7 @@ impl ValidationTracer { let packed_abi = data.src0_value.value; let call_destination_value = data.src1_value.value; - let called_address = u256_to_account_address(&call_destination_value); + let called_address = u256_to_address(&call_destination_value); let far_call_abi = FarCallABI::from_u256(packed_abi); if called_address == KECCAK256_PRECOMPILE_ADDRESS @@ -115,7 +116,7 @@ impl ValidationTracer { let value = storage.borrow_mut().read_value(&storage_key); return Ok(NewTrustedValidationItems { - new_trusted_addresses: vec![h256_to_account_address(&value)], + new_trusted_addresses: vec![h256_to_address(&value)], ..Default::default() }); } diff --git a/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs b/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs index 86a639915c9d..94f31ddf138d 100644 --- a/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs @@ -3,8 +3,9 @@ use zk_evm_1_3_3::{ zkevm_opcode_defs::{ContextOpcode, FarCallABI, LogOpcode, Opcode}, }; use zksync_system_constants::KECCAK256_PRECOMPILE_ADDRESS; -use zksync_types::{get_code_key, AccountTreeId, StorageKey, H256}; -use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h256}; +use zksync_types::{ + get_code_key, h256_to_address, u256_to_address, u256_to_h256, AccountTreeId, StorageKey, H256, +}; use crate::{ interface::{ @@ -48,7 +49,7 @@ impl ValidationTracer { let packed_abi = data.src0_value.value; let call_destination_value = data.src1_value.value; - let called_address = u256_to_account_address(&call_destination_value); + let called_address = u256_to_address(&call_destination_value); let far_call_abi = FarCallABI::from_u256(packed_abi); if called_address == KECCAK256_PRECOMPILE_ADDRESS @@ -115,7 +116,7 @@ impl ValidationTracer { let value = storage.borrow_mut().read_value(&storage_key); return Ok(NewTrustedValidationItems { - new_trusted_addresses: vec![h256_to_account_address(&value)], + new_trusted_addresses: vec![h256_to_address(&value)], ..Default::default() }); } diff --git a/core/lib/multivm/src/utils/bytecode.rs b/core/lib/multivm/src/utils/bytecode.rs index 260749b44f3c..f5dee805864e 100644 --- a/core/lib/multivm/src/utils/bytecode.rs +++ b/core/lib/multivm/src/utils/bytecode.rs @@ -1,10 +1,60 @@ use std::collections::HashMap; -use zksync_types::ethabi::{self, Token}; -use zksync_utils::bytecode::{hash_bytecode, validate_bytecode, InvalidBytecodeError}; +use zksync_types::{ + bytecode::{validate_bytecode, BytecodeHash, InvalidBytecodeError}, + ethabi::{self, Token}, + Address, H256, U256, +}; use crate::interface::CompressedBytecodeInfo; +pub(crate) fn be_chunks_to_h256_words(chunks: Vec<[u8; 32]>) -> Vec { + chunks.into_iter().map(|el| H256::from_slice(&el)).collect() +} + +pub(crate) fn be_words_to_bytes(words: &[U256]) -> Vec { + words + .iter() + .flat_map(|w| { + let mut bytes = [0u8; 32]; + w.to_big_endian(&mut bytes); + bytes + }) + .collect() +} + +pub(crate) fn bytes_to_be_words(bytes: &[u8]) -> Vec { + assert_eq!( + bytes.len() % 32, + 0, + "Bytes must be divisible by 32 to split into chunks" + ); + bytes.chunks(32).map(U256::from_big_endian).collect() +} + +pub(crate) fn be_bytes_to_safe_address(bytes: &[u8]) -> Option
{ + if bytes.len() < 20 { + return None; + } + + let (zero_bytes, address_bytes) = bytes.split_at(bytes.len() - 20); + + if zero_bytes.iter().any(|b| *b != 0) { + None + } else { + Some(Address::from_slice(address_bytes)) + } +} + +pub(crate) fn bytecode_len_in_words(bytecode_hash: &H256) -> u16 { + let bytes = bytecode_hash.as_bytes(); + u16::from_be_bytes([bytes[2], bytes[3]]) +} + +pub(crate) fn bytecode_len_in_bytes(bytecode_hash: &H256) -> u32 { + u32::from(bytecode_len_in_words(bytecode_hash)) * 32 +} + #[derive(Debug, thiserror::Error)] pub(crate) enum FailedToCompressBytecodeError { #[error("Number of unique 8-bytes bytecode chunks exceed the limit of 2^16 - 1")] @@ -87,7 +137,10 @@ pub(crate) fn compress( } pub(crate) fn encode_call(bytecode: &CompressedBytecodeInfo) -> Vec { - let mut bytecode_hash = hash_bytecode(&bytecode.original).as_bytes().to_vec(); + let mut bytecode_hash = BytecodeHash::for_bytecode(&bytecode.original) + .value() + .as_bytes() + .to_vec(); let empty_cell = [0_u8; 32]; bytecode_hash.extend_from_slice(&empty_cell); diff --git a/core/lib/multivm/src/utils/deduplicator.rs b/core/lib/multivm/src/utils/deduplicator.rs index e9a870e6901d..0cb4c3fa7cd8 100644 --- a/core/lib/multivm/src/utils/deduplicator.rs +++ b/core/lib/multivm/src/utils/deduplicator.rs @@ -1,10 +1,9 @@ use std::collections::HashMap; use zksync_types::{ - writes::compression::compress_with_best_strategy, StorageKey, StorageLogKind, + h256_to_u256, writes::compression::compress_with_best_strategy, StorageKey, StorageLogKind, StorageLogWithPreviousValue, H256, }; -use zksync_utils::h256_to_u256; use crate::interface::DeduplicatedWritesMetrics; @@ -211,8 +210,7 @@ impl StorageWritesDeduplicator { #[cfg(test)] mod tests { - use zksync_types::{AccountTreeId, StorageLog, H160, U256}; - use zksync_utils::u256_to_h256; + use zksync_types::{u256_to_h256, AccountTreeId, StorageLog, H160, U256}; use super::*; diff --git a/core/lib/multivm/src/utils/events.rs b/core/lib/multivm/src/utils/events.rs index 9720cb779142..37124b822040 100644 --- a/core/lib/multivm/src/utils/events.rs +++ b/core/lib/multivm/src/utils/events.rs @@ -1,59 +1,10 @@ use zksync_system_constants::L1_MESSENGER_ADDRESS; use zksync_types::{ ethabi::{self, Token}, - l2_to_l1_log::L2ToL1Log, - Address, H256, U256, + H256, U256, }; -use zksync_utils::{u256_to_bytes_be, u256_to_h256}; -use crate::interface::VmEvent; - -/// Corresponds to the following solidity event: -/// ```solidity -/// struct L2ToL1Log { -/// uint8 l2ShardId; -/// bool isService; -/// uint16 txNumberInBlock; -/// address sender; -/// bytes32 key; -/// bytes32 value; -/// } -/// ``` -#[derive(Debug, Default, Clone, PartialEq)] -pub(crate) struct L1MessengerL2ToL1Log { - pub l2_shard_id: u8, - pub is_service: bool, - pub tx_number_in_block: u16, - pub sender: Address, - pub key: U256, - pub value: U256, -} - -impl L1MessengerL2ToL1Log { - pub fn packed_encoding(&self) -> Vec { - let mut res: Vec = vec![]; - res.push(self.l2_shard_id); - res.push(self.is_service as u8); - res.extend_from_slice(&self.tx_number_in_block.to_be_bytes()); - res.extend_from_slice(self.sender.as_bytes()); - res.extend(u256_to_bytes_be(&self.key)); - res.extend(u256_to_bytes_be(&self.value)); - res - } -} - -impl From for L2ToL1Log { - fn from(log: L1MessengerL2ToL1Log) -> Self { - L2ToL1Log { - shard_id: log.l2_shard_id, - is_service: log.is_service, - tx_number_in_block: log.tx_number_in_block, - sender: log.sender, - key: u256_to_h256(log.key), - value: u256_to_h256(log.value), - } - } -} +use crate::interface::{pubdata::L1MessengerL2ToL1Log, VmEvent}; #[derive(Debug, PartialEq)] pub(crate) struct L1MessengerBytecodePublicationRequest { @@ -142,7 +93,7 @@ mod tests { use zksync_system_constants::{ BOOTLOADER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L2_BASE_TOKEN_ADDRESS, }; - use zksync_types::L1BatchNumber; + use zksync_types::{u256_to_h256, Address, L1BatchNumber}; use super::*; diff --git a/core/lib/multivm/src/utils/mod.rs b/core/lib/multivm/src/utils/mod.rs index 5d8fba7a2acd..4332c0327ff1 100644 --- a/core/lib/multivm/src/utils/mod.rs +++ b/core/lib/multivm/src/utils/mod.rs @@ -53,7 +53,9 @@ pub fn derive_base_fee_and_gas_per_pubdata( VmVersion::Vm1_4_2 => crate::vm_1_4_2::utils::fee::derive_base_fee_and_gas_per_pubdata( batch_fee_input.into_pubdata_independent(), ), - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => { crate::vm_latest::utils::fee::derive_base_fee_and_gas_per_pubdata( batch_fee_input.into_pubdata_independent(), ) @@ -81,9 +83,9 @@ pub fn get_batch_base_fee(l1_batch_env: &L1BatchEnv, vm_version: VmVersion) -> u } VmVersion::Vm1_4_1 => crate::vm_1_4_1::utils::fee::get_batch_base_fee(l1_batch_env), VmVersion::Vm1_4_2 => crate::vm_1_4_2::utils::fee::get_batch_base_fee(l1_batch_env), - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::utils::fee::get_batch_base_fee(l1_batch_env) - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::utils::fee::get_batch_base_fee(l1_batch_env), } } @@ -209,9 +211,9 @@ pub fn derive_overhead( } VmVersion::Vm1_4_1 => crate::vm_1_4_1::utils::overhead::derive_overhead(encoded_len), VmVersion::Vm1_4_2 => crate::vm_1_4_2::utils::overhead::derive_overhead(encoded_len), - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::utils::overhead::derive_overhead(encoded_len) - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::utils::overhead::derive_overhead(encoded_len), } } @@ -237,14 +239,17 @@ pub fn get_bootloader_encoding_space(version: VmVersion) -> u32 { VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::BOOTLOADER_TX_ENCODING_SPACE, VmVersion::Vm1_5_0SmallBootloaderMemory => { crate::vm_latest::constants::get_bootloader_tx_encoding_space( - crate::vm_latest::MultiVMSubversion::SmallBootloaderMemory, + crate::vm_latest::MultiVmSubversion::SmallBootloaderMemory, ) } VmVersion::Vm1_5_0IncreasedBootloaderMemory => { crate::vm_latest::constants::get_bootloader_tx_encoding_space( - crate::vm_latest::MultiVMSubversion::IncreasedBootloaderMemory, + crate::vm_latest::MultiVmSubversion::IncreasedBootloaderMemory, ) } + VmVersion::VmGateway => crate::vm_latest::constants::get_bootloader_tx_encoding_space( + crate::vm_latest::MultiVmSubversion::Gateway, + ), } } @@ -264,9 +269,9 @@ pub fn get_bootloader_max_txs_in_batch(version: VmVersion) -> usize { VmVersion::VmBoojumIntegration => crate::vm_boojum_integration::constants::MAX_TXS_IN_BLOCK, VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::MAX_TXS_IN_BATCH, VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::MAX_TXS_IN_BATCH, - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::MAX_TXS_IN_BATCH - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::MAX_TXS_IN_BATCH, } } @@ -287,9 +292,9 @@ pub fn gas_bootloader_batch_tip_overhead(version: VmVersion) -> u32 { } VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::BOOTLOADER_BATCH_TIP_OVERHEAD, VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::BOOTLOADER_BATCH_TIP_OVERHEAD, - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::BOOTLOADER_BATCH_TIP_OVERHEAD - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::BOOTLOADER_BATCH_TIP_OVERHEAD, } } @@ -310,7 +315,9 @@ pub fn circuit_statistics_bootloader_batch_tip_overhead(version: VmVersion) -> u VmVersion::Vm1_4_2 => { crate::vm_1_4_2::constants::BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as usize } - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => { crate::vm_latest::constants::BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as usize } } @@ -333,7 +340,9 @@ pub fn execution_metrics_bootloader_batch_tip_overhead(version: VmVersion) -> us VmVersion::Vm1_4_2 => { crate::vm_1_4_2::constants::BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as usize } - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => { crate::vm_latest::constants::BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as usize } } @@ -357,9 +366,9 @@ pub fn get_max_gas_per_pubdata_byte(version: VmVersion) -> u64 { } VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::MAX_GAS_PER_PUBDATA_BYTE, VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::MAX_GAS_PER_PUBDATA_BYTE, - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::MAX_GAS_PER_PUBDATA_BYTE - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::MAX_GAS_PER_PUBDATA_BYTE, } } @@ -385,14 +394,17 @@ pub fn get_used_bootloader_memory_bytes(version: VmVersion) -> usize { VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::USED_BOOTLOADER_MEMORY_BYTES, VmVersion::Vm1_5_0SmallBootloaderMemory => { crate::vm_latest::constants::get_used_bootloader_memory_bytes( - crate::vm_latest::MultiVMSubversion::SmallBootloaderMemory, + crate::vm_latest::MultiVmSubversion::SmallBootloaderMemory, ) } VmVersion::Vm1_5_0IncreasedBootloaderMemory => { crate::vm_latest::constants::get_used_bootloader_memory_bytes( - crate::vm_latest::MultiVMSubversion::IncreasedBootloaderMemory, + crate::vm_latest::MultiVmSubversion::IncreasedBootloaderMemory, ) } + VmVersion::VmGateway => crate::vm_latest::constants::get_used_bootloader_memory_bytes( + crate::vm_latest::MultiVmSubversion::Gateway, + ), } } @@ -418,14 +430,17 @@ pub fn get_used_bootloader_memory_words(version: VmVersion) -> usize { VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::USED_BOOTLOADER_MEMORY_WORDS, VmVersion::Vm1_5_0SmallBootloaderMemory => { crate::vm_latest::constants::get_used_bootloader_memory_bytes( - crate::vm_latest::MultiVMSubversion::SmallBootloaderMemory, + crate::vm_latest::MultiVmSubversion::SmallBootloaderMemory, ) } VmVersion::Vm1_5_0IncreasedBootloaderMemory => { crate::vm_latest::constants::get_used_bootloader_memory_bytes( - crate::vm_latest::MultiVMSubversion::IncreasedBootloaderMemory, + crate::vm_latest::MultiVmSubversion::IncreasedBootloaderMemory, ) } + VmVersion::VmGateway => crate::vm_latest::constants::get_used_bootloader_memory_bytes( + crate::vm_latest::MultiVmSubversion::Gateway, + ), } } @@ -447,9 +462,9 @@ pub fn get_max_batch_gas_limit(version: VmVersion) -> u64 { } VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::BLOCK_GAS_LIMIT as u64, VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::BLOCK_GAS_LIMIT as u64, - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::BATCH_GAS_LIMIT - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::BATCH_GAS_LIMIT, } } @@ -473,9 +488,9 @@ pub fn get_eth_call_gas_limit(version: VmVersion) -> u64 { } VmVersion::Vm1_4_1 => crate::vm_1_4_1::constants::ETH_CALL_GAS_LIMIT as u64, VmVersion::Vm1_4_2 => crate::vm_1_4_2::constants::ETH_CALL_GAS_LIMIT as u64, - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::ETH_CALL_GAS_LIMIT - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::ETH_CALL_GAS_LIMIT, } } @@ -496,9 +511,9 @@ pub fn get_max_batch_base_layer_circuits(version: VmVersion) -> usize { // We avoid providing `0` for the old versions to avoid potential errors when working with old versions. crate::vm_1_4_2::constants::MAX_BASE_LAYER_CIRCUITS } - VmVersion::Vm1_5_0SmallBootloaderMemory | VmVersion::Vm1_5_0IncreasedBootloaderMemory => { - crate::vm_latest::constants::MAX_BASE_LAYER_CIRCUITS - } + VmVersion::Vm1_5_0SmallBootloaderMemory + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => crate::vm_latest::constants::MAX_BASE_LAYER_CIRCUITS, } } diff --git a/core/lib/multivm/src/versions/README.md b/core/lib/multivm/src/versions/README.md deleted file mode 100644 index 01c575091974..000000000000 --- a/core/lib/multivm/src/versions/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# MultiVM dependencies - -This folder contains the old versions of the VM we have used in the past. The `multivm` crate uses them to dynamically -switch the version we use to be able to sync from the genesis. This is a temporary measure until a "native" solution is -implemented (i.e., the `vm` crate would itself know the changes between versions, and thus we will have only the -functional diff between versions, not several fully-fledged VMs). - -## Versions - -| Name | Protocol versions | Description | -| ---------------------- | ----------------- | --------------------------------------------------------------------- | -| vm_m5 | 0 - 3 | Release for the testnet launch | -| vm_m6 | 4 - 6 | Release for the mainnet launch | -| vm_1_3_2 | 7 - 12 | Release 1.3.2 of the crypto circuits | -| vm_virtual_blocks | 13 - 15 | Adding virtual blocks to help with block number / timestamp migration | -| vm_refunds_enhancement | 16 - 17 | Fixing issue related to refunds in VM | -| vm_boojum_integration | 18 - | New Proving system (boojum), vm version 1.4.0 | diff --git a/core/lib/multivm/src/versions/mod.rs b/core/lib/multivm/src/versions/mod.rs index bcb246cece46..b6523b3d474a 100644 --- a/core/lib/multivm/src/versions/mod.rs +++ b/core/lib/multivm/src/versions/mod.rs @@ -1,8 +1,8 @@ +#[cfg(test)] +mod shadow; mod shared; #[cfg(test)] mod testonly; -#[cfg(test)] -mod tests; pub mod vm_1_3_2; pub mod vm_1_4_1; pub mod vm_1_4_2; diff --git a/core/lib/multivm/src/versions/tests.rs b/core/lib/multivm/src/versions/shadow/mod.rs similarity index 88% rename from core/lib/multivm/src/versions/tests.rs rename to core/lib/multivm/src/versions/shadow/mod.rs index c2a04c155fec..a335d0fe5906 100644 --- a/core/lib/multivm/src/versions/tests.rs +++ b/core/lib/multivm/src/versions/shadow/mod.rs @@ -2,17 +2,11 @@ //! these tests are placed here. use assert_matches::assert_matches; -use ethabi::Contract; -use zksync_contracts::{ - get_loadnext_contract, load_contract, read_bytecode, - test_contracts::LoadnextContractExecutionParams, -}; -use zksync_test_account::{Account, TxType}; +use zksync_test_contracts::{Account, LoadnextContractExecutionParams, TestContract, TxType}; use zksync_types::{ block::L2BlockHasher, fee::Fee, AccountTreeId, Address, Execute, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, H256, U256, }; -use zksync_utils::bytecode::hash_bytecode; use crate::{ interface::{ @@ -22,12 +16,14 @@ use crate::{ }, utils::get_max_gas_per_pubdata_byte, versions::testonly::{ - default_l1_batch, default_system_env, make_account_rich, ContractToDeploy, + default_l1_batch, default_system_env, make_address_rich, ContractToDeploy, }, - vm_fast, - vm_latest::{self, HistoryEnabled}, + vm_fast, vm_latest, + vm_latest::HistoryEnabled, }; +mod tests; + type ReferenceVm = vm_latest::Vm, HistoryEnabled>; type ShadowedFastVm = crate::vm_instance::ShadowedFastVm; @@ -59,31 +55,29 @@ struct Harness { alice: Account, bob: Account, storage_contract: ContractToDeploy, - storage_contract_abi: Contract, + storage_contract_abi: &'static ethabi::Contract, current_block: L2BlockEnv, } impl Harness { - const STORAGE_CONTRACT_PATH: &'static str = - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json"; const STORAGE_CONTRACT_ADDRESS: Address = Address::repeat_byte(23); fn new(l1_batch_env: &L1BatchEnv) -> Self { Self { - alice: Account::random(), - bob: Account::random(), + alice: Account::from_seed(0), + bob: Account::from_seed(1), storage_contract: ContractToDeploy::new( - read_bytecode(Self::STORAGE_CONTRACT_PATH), + TestContract::storage_test().bytecode.to_vec(), Self::STORAGE_CONTRACT_ADDRESS, ), - storage_contract_abi: load_contract(Self::STORAGE_CONTRACT_PATH), + storage_contract_abi: &TestContract::storage_test().abi, current_block: l1_batch_env.first_l2_block, } } fn setup_storage(&self, storage: &mut InMemoryStorage) { - make_account_rich(storage, &self.alice); - make_account_rich(storage, &self.bob); + make_address_rich(storage, self.alice.address); + make_address_rich(storage, self.bob.address); self.storage_contract.insert(storage); let storage_contract_key = StorageKey::new( @@ -176,7 +170,7 @@ impl Harness { self.new_block(vm, &[out_of_gas_transfer.hash(), simple_write_tx.hash()]); let deploy_tx = self.alice.get_deploy_tx( - &get_loadnext_contract().bytecode, + TestContract::load_test().bytecode, Some(&[ethabi::Token::Uint(100.into())]), TxType::L2, ); @@ -196,7 +190,6 @@ impl Harness { assert!(!exec_result.result.is_failed(), "{:#?}", exec_result); self.new_block(vm, &[deploy_tx.tx.hash(), load_test_tx.hash()]); - vm.finish_batch(); } } @@ -206,7 +199,7 @@ where { let system_env = default_system_env(); let l1_batch_env = default_l1_batch(L1BatchNumber(1)); - let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + let mut storage = InMemoryStorage::with_system_contracts(); let mut harness = Harness::new(&l1_batch_env); harness.setup_storage(&mut storage); @@ -230,7 +223,7 @@ fn sanity_check_harness_on_new_vm() { fn sanity_check_shadow_vm() { let system_env = default_system_env(); let l1_batch_env = default_l1_batch(L1BatchNumber(1)); - let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + let mut storage = InMemoryStorage::with_system_contracts(); let mut harness = Harness::new(&l1_batch_env); harness.setup_storage(&mut storage); @@ -257,7 +250,7 @@ fn shadow_vm_basics() { pretty_assertions::assert_eq!(replayed_dump, dump); // Check that the VM executes identically when reading from the original storage and one restored from the dump. - let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + let mut storage = InMemoryStorage::with_system_contracts(); harness.setup_storage(&mut storage); let storage = StorageView::new(storage).to_rc_ptr(); diff --git a/core/lib/multivm/src/versions/shadow/tests.rs b/core/lib/multivm/src/versions/shadow/tests.rs new file mode 100644 index 000000000000..4466d96a96b7 --- /dev/null +++ b/core/lib/multivm/src/versions/shadow/tests.rs @@ -0,0 +1,490 @@ +//! Unit tests from the `testonly` test suite. + +use std::{collections::HashSet, rc::Rc}; + +use zksync_types::{writes::StateDiffRecord, StorageKey, Transaction, H256, U256}; +use zksync_vm_interface::pubdata::PubdataBuilder; + +use super::ShadowedFastVm; +use crate::{ + interface::{ + utils::{ShadowMut, ShadowRef}, + CurrentExecutionState, L2BlockEnv, VmExecutionResultAndLogs, + }, + versions::testonly::TestedVm, +}; + +impl TestedVm for ShadowedFastVm { + type StateDump = (); + + fn dump_state(&self) -> Self::StateDump { + // Do nothing + } + + fn gas_remaining(&mut self) -> u32 { + self.get_mut("gas_remaining", |r| match r { + ShadowMut::Main(vm) => vm.gas_remaining(), + ShadowMut::Shadow(vm) => vm.gas_remaining(), + }) + } + + fn get_current_execution_state(&self) -> CurrentExecutionState { + self.get_custom("current_execution_state", |r| match r { + ShadowRef::Main(vm) => vm.get_current_execution_state(), + ShadowRef::Shadow(vm) => vm.get_current_execution_state(), + }) + } + + fn decommitted_hashes(&self) -> HashSet { + self.get("decommitted_hashes", |r| match r { + ShadowRef::Main(vm) => vm.decommitted_hashes(), + ShadowRef::Shadow(vm) => TestedVm::decommitted_hashes(vm), + }) + } + + fn finish_batch_with_state_diffs( + &mut self, + diffs: Vec, + pubdata_builder: Rc, + ) -> VmExecutionResultAndLogs { + self.get_custom_mut("finish_batch_with_state_diffs", |r| match r { + ShadowMut::Main(vm) => { + vm.finish_batch_with_state_diffs(diffs.clone(), pubdata_builder.clone()) + } + ShadowMut::Shadow(vm) => { + vm.finish_batch_with_state_diffs(diffs.clone(), pubdata_builder.clone()) + } + }) + } + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs { + self.get_custom_mut("finish_batch_without_pubdata", |r| match r { + ShadowMut::Main(vm) => vm.finish_batch_without_pubdata(), + ShadowMut::Shadow(vm) => vm.finish_batch_without_pubdata(), + }) + } + + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]) { + self.get_mut("insert_bytecodes", |r| match r { + ShadowMut::Main(vm) => vm.insert_bytecodes(bytecodes), + ShadowMut::Shadow(vm) => TestedVm::insert_bytecodes(vm, bytecodes), + }); + } + + fn known_bytecode_hashes(&self) -> HashSet { + self.get("known_bytecode_hashes", |r| match r { + ShadowRef::Main(vm) => vm.known_bytecode_hashes(), + ShadowRef::Shadow(vm) => vm.known_bytecode_hashes(), + }) + } + + fn manually_decommit(&mut self, code_hash: H256) -> bool { + self.get_mut("manually_decommit", |r| match r { + ShadowMut::Main(vm) => vm.manually_decommit(code_hash), + ShadowMut::Shadow(vm) => vm.manually_decommit(code_hash), + }) + } + + fn verify_required_bootloader_heap(&self, cells: &[(u32, U256)]) { + self.get("verify_required_bootloader_heap", |r| match r { + ShadowRef::Main(vm) => vm.verify_required_bootloader_heap(cells), + ShadowRef::Shadow(vm) => vm.verify_required_bootloader_heap(cells), + }); + } + + fn write_to_bootloader_heap(&mut self, cells: &[(usize, U256)]) { + self.get_mut("manually_decommit", |r| match r { + ShadowMut::Main(vm) => vm.write_to_bootloader_heap(cells), + ShadowMut::Shadow(vm) => TestedVm::write_to_bootloader_heap(vm, cells), + }); + } + + fn read_storage(&mut self, key: StorageKey) -> U256 { + self.get_mut("read_storage", |r| match r { + ShadowMut::Main(vm) => vm.read_storage(key), + ShadowMut::Shadow(vm) => vm.read_storage(key), + }) + } + + fn last_l2_block_hash(&self) -> H256 { + self.get("last_l2_block_hash", |r| match r { + ShadowRef::Main(vm) => vm.last_l2_block_hash(), + ShadowRef::Shadow(vm) => vm.last_l2_block_hash(), + }) + } + + fn push_l2_block_unchecked(&mut self, block: L2BlockEnv) { + self.get_mut("push_l2_block_unchecked", |r| match r { + ShadowMut::Main(vm) => vm.push_l2_block_unchecked(block), + ShadowMut::Shadow(vm) => vm.push_l2_block_unchecked(block), + }); + } + + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { + self.get_mut("push_transaction_with_refund", |r| match r { + ShadowMut::Main(vm) => vm.push_transaction_with_refund(tx.clone(), refund), + ShadowMut::Shadow(vm) => vm.push_transaction_with_refund(tx.clone(), refund), + }); + } +} + +mod block_tip { + use crate::versions::testonly::block_tip::*; + + #[test] + fn dry_run_upper_bound() { + test_dry_run_upper_bound::(); + } +} + +mod bootloader { + use crate::versions::testonly::bootloader::*; + + #[test] + fn dummy_bootloader() { + test_dummy_bootloader::(); + } + + #[test] + fn bootloader_out_of_gas() { + test_bootloader_out_of_gas::(); + } +} + +mod bytecode_publishing { + use crate::versions::testonly::bytecode_publishing::*; + + #[test] + fn bytecode_publishing() { + test_bytecode_publishing::(); + } +} + +mod circuits { + use crate::versions::testonly::circuits::*; + + #[test] + fn circuits() { + test_circuits::(); + } +} + +mod code_oracle { + use crate::versions::testonly::code_oracle::*; + + #[test] + fn code_oracle() { + test_code_oracle::(); + } + + #[test] + fn code_oracle_big_bytecode() { + test_code_oracle_big_bytecode::(); + } + + #[test] + fn refunds_in_code_oracle() { + test_refunds_in_code_oracle::(); + } +} + +mod default_aa { + use crate::versions::testonly::default_aa::*; + + #[test] + fn default_aa_interaction() { + test_default_aa_interaction::(); + } +} + +mod evm_emulator { + use test_casing::{test_casing, Product}; + + use crate::versions::testonly::evm_emulator::*; + + #[test] + fn tracing_evm_contract_deployment() { + test_tracing_evm_contract_deployment::(); + } + + #[test] + fn mock_emulator_basics() { + test_mock_emulator_basics::(); + } + + #[test_casing(2, [false, true])] + #[test] + fn mock_emulator_with_payment(deploy_emulator: bool) { + test_mock_emulator_with_payment::(deploy_emulator); + } + + #[test_casing(4, Product(([false, true], [false, true])))] + #[test] + fn mock_emulator_with_recursion(deploy_emulator: bool, is_external: bool) { + test_mock_emulator_with_recursion::(deploy_emulator, is_external); + } + + #[test] + fn calling_to_mock_emulator_from_native_contract() { + test_calling_to_mock_emulator_from_native_contract::(); + } + + #[test] + fn mock_emulator_with_deployment() { + test_mock_emulator_with_deployment::(false); + } + + #[test] + fn mock_emulator_with_reverted_deployment() { + test_mock_emulator_with_deployment::(true); + } + + #[test] + fn mock_emulator_with_recursive_deployment() { + test_mock_emulator_with_recursive_deployment::(); + } + + #[test] + fn mock_emulator_with_partial_reverts() { + test_mock_emulator_with_partial_reverts::(); + } + + #[test] + fn mock_emulator_with_delegate_call() { + test_mock_emulator_with_delegate_call::(); + } + + #[test] + fn mock_emulator_with_static_call() { + test_mock_emulator_with_static_call::(); + } +} + +mod gas_limit { + use crate::versions::testonly::gas_limit::*; + + #[test] + fn tx_gas_limit_offset() { + test_tx_gas_limit_offset::(); + } +} + +mod get_used_contracts { + use crate::versions::testonly::get_used_contracts::*; + + #[test] + fn get_used_contracts() { + test_get_used_contracts::(); + } + + #[test] + fn get_used_contracts_with_far_call() { + test_get_used_contracts_with_far_call::(); + } + + #[test] + fn get_used_contracts_with_out_of_gas_far_call() { + test_get_used_contracts_with_out_of_gas_far_call::(); + } +} + +mod is_write_initial { + use crate::versions::testonly::is_write_initial::*; + + #[test] + fn is_write_initial_behaviour() { + test_is_write_initial_behaviour::(); + } +} + +mod l1_tx_execution { + use crate::versions::testonly::l1_tx_execution::*; + + #[test] + fn l1_tx_execution() { + test_l1_tx_execution::(); + } + + #[test] + fn l1_tx_execution_high_gas_limit() { + test_l1_tx_execution_high_gas_limit::(); + } +} + +mod l2_blocks { + use crate::versions::testonly::l2_blocks::*; + + #[test] + fn l2_block_initialization_timestamp() { + test_l2_block_initialization_timestamp::(); + } + + #[test] + fn l2_block_initialization_number_non_zero() { + test_l2_block_initialization_number_non_zero::(); + } + + #[test] + fn l2_block_same_l2_block() { + test_l2_block_same_l2_block::(); + } + + #[test] + fn l2_block_new_l2_block() { + test_l2_block_new_l2_block::(); + } + + #[test] + fn l2_block_first_in_batch() { + test_l2_block_first_in_batch::(); + } +} + +mod nonce_holder { + use crate::versions::testonly::nonce_holder::*; + + #[test] + fn nonce_holder() { + test_nonce_holder::(); + } +} + +mod precompiles { + use crate::versions::testonly::precompiles::*; + + #[test] + fn keccak() { + test_keccak::(); + } + + #[test] + fn sha256() { + test_sha256::(); + } + + #[test] + fn ecrecover() { + test_ecrecover::(); + } +} + +mod refunds { + use crate::versions::testonly::refunds::*; + + #[test] + fn predetermined_refunded_gas() { + test_predetermined_refunded_gas::(); + } + + #[test] + fn negative_pubdata_for_transaction() { + test_negative_pubdata_for_transaction::(); + } +} + +mod require_eip712 { + use crate::versions::testonly::require_eip712::*; + + #[test] + fn require_eip712() { + test_require_eip712::(); + } +} + +mod rollbacks { + use crate::versions::testonly::rollbacks::*; + + #[test] + fn vm_rollbacks() { + test_vm_rollbacks::(); + } + + #[test] + fn vm_loadnext_rollbacks() { + test_vm_loadnext_rollbacks::(); + } + + #[test] + fn rollback_in_call_mode() { + test_rollback_in_call_mode::(); + } +} + +mod secp256r1 { + use crate::versions::testonly::secp256r1::*; + + #[test] + fn secp256r1() { + test_secp256r1::(); + } +} + +mod simple_execution { + use crate::versions::testonly::simple_execution::*; + + #[test] + fn estimate_fee() { + test_estimate_fee::(); + } + + #[test] + fn simple_execute() { + test_simple_execute::(); + } +} + +mod storage { + use crate::versions::testonly::storage::*; + + #[test] + fn storage_behavior() { + test_storage_behavior::(); + } + + #[test] + fn transient_storage_behavior() { + test_transient_storage_behavior::(); + } +} + +mod tracing_execution_error { + use crate::versions::testonly::tracing_execution_error::*; + + #[test] + fn tracing_of_execution_errors() { + test_tracing_of_execution_errors::(); + } +} + +mod transfer { + use crate::versions::testonly::transfer::*; + + #[test] + fn send_and_transfer() { + test_send_and_transfer::(); + } + + #[test] + fn reentrancy_protection_send_and_transfer() { + test_reentrancy_protection_send_and_transfer::(); + } +} + +mod upgrade { + use crate::versions::testonly::upgrade::*; + + #[test] + fn protocol_upgrade_is_first() { + test_protocol_upgrade_is_first::(); + } + + #[test] + fn force_deploy_upgrade() { + test_force_deploy_upgrade::(); + } + + #[test] + fn complex_upgrader() { + test_complex_upgrader::(); + } +} diff --git a/core/lib/multivm/src/versions/testonly.rs b/core/lib/multivm/src/versions/testonly.rs deleted file mode 100644 index 51a4d0842d90..000000000000 --- a/core/lib/multivm/src/versions/testonly.rs +++ /dev/null @@ -1,93 +0,0 @@ -use zksync_contracts::BaseSystemContracts; -use zksync_test_account::Account; -use zksync_types::{ - block::L2BlockHasher, fee_model::BatchFeeInput, get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, utils::storage_key_for_eth_balance, Address, L1BatchNumber, - L2BlockNumber, L2ChainId, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{storage::InMemoryStorage, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, - vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, -}; - -pub(super) fn default_system_env() -> SystemEnv { - SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - chain_id: L2ChainId::from(270), - } -} - -pub(super) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(super) fn make_account_rich(storage: &mut InMemoryStorage, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage.set_value(key, u256_to_h256(U256::from(10_u64.pow(19)))); -} - -#[derive(Debug, Clone)] -pub(super) struct ContractToDeploy { - bytecode: Vec, - address: Address, - is_account: bool, -} - -impl ContractToDeploy { - pub fn new(bytecode: Vec, address: Address) -> Self { - Self { - bytecode, - address, - is_account: false, - } - } - - pub fn account(bytecode: Vec, address: Address) -> Self { - Self { - bytecode, - address, - is_account: true, - } - } - - pub fn insert(&self, storage: &mut InMemoryStorage) { - let deployer_code_key = get_code_key(&self.address); - storage.set_value(deployer_code_key, hash_bytecode(&self.bytecode)); - if self.is_account { - let is_account_key = get_is_account_key(&self.address); - storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - storage.store_factory_dep(hash_bytecode(&self.bytecode), self.bytecode.clone()); - } - - /// Inserts the contracts into the test environment, bypassing the deployer system contract. - pub fn insert_all(contracts: &[Self], storage: &mut InMemoryStorage) { - for contract in contracts { - contract.insert(storage); - } - } -} diff --git a/core/lib/multivm/src/versions/testonly/block_tip.rs b/core/lib/multivm/src/versions/testonly/block_tip.rs new file mode 100644 index 000000000000..efdf2e1b0cdf --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/block_tip.rs @@ -0,0 +1,395 @@ +use ethabi::Token; +use itertools::Itertools; +use zksync_contracts::load_sys_contract; +use zksync_system_constants::{ + CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, +}; +use zksync_test_contracts::TestContract; +use zksync_types::{ + bytecode::BytecodeHash, commitment::SerializeCommitment, fee_model::BatchFeeInput, + get_code_key, l2_to_l1_log::L2ToL1Log, u256_to_h256, writes::StateDiffRecord, Address, Execute, + H256, U256, +}; + +use super::{ + default_pubdata_builder, get_empty_storage, + tester::{TestedVm, VmTesterBuilder}, +}; +use crate::{ + interface::{InspectExecutionMode, L1BatchEnv, TxExecutionMode, VmInterfaceExt}, + versions::testonly::default_l1_batch, + vm_latest::constants::{ + BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, + BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, + MAX_VM_PUBDATA_PER_BATCH, + }, +}; + +#[derive(Debug, Clone, Default)] +struct L1MessengerTestData { + l2_to_l1_logs: usize, + messages: Vec>, + bytecodes: Vec>, + state_diffs: Vec, +} + +struct MimicCallInfo { + to: Address, + who_to_mimic: Address, + data: Vec, +} + +const CALLS_PER_TX: usize = 1_000; + +fn populate_mimic_calls(data: L1MessengerTestData) -> Vec> { + let complex_upgrade = TestContract::complex_upgrade(); + let l1_messenger = load_sys_contract("L1Messenger"); + + let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|i| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("sendL2ToL1Log") + .unwrap() + .encode_input(&[ + Token::Bool(false), + Token::FixedBytes(H256::from_low_u64_be(2 * i as u64).0.to_vec()), + Token::FixedBytes(H256::from_low_u64_be(2 * i as u64 + 1).0.to_vec()), + ]) + .unwrap(), + }); + let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("sendToL1") + .unwrap() + .encode_input(&[Token::Bytes(message.clone())]) + .unwrap(), + }); + let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("requestBytecodeL1Publication") + .unwrap() + .encode_input(&[Token::FixedBytes( + BytecodeHash::for_bytecode(bytecode).value().0.to_vec(), + )]) + .unwrap(), + }); + + let encoded_calls = logs_mimic_calls + .chain(messages_mimic_calls) + .chain(bytecodes_mimic_calls) + .map(|call| { + Token::Tuple(vec![ + Token::Address(call.to), + Token::Address(call.who_to_mimic), + Token::Bytes(call.data), + ]) + }) + .chunks(CALLS_PER_TX) + .into_iter() + .map(|chunk| { + complex_upgrade + .function("mimicCalls") + .encode_input(&[Token::Array(chunk.collect_vec())]) + .unwrap() + }) + .collect_vec(); + + encoded_calls +} + +struct TestStatistics { + pub max_used_gas: u32, + pub circuit_statistics: u64, + pub execution_metrics_size: u64, +} + +struct StatisticsTagged { + pub statistics: TestStatistics, + pub tag: String, +} + +fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { + let mut storage = get_empty_storage(); + let complex_upgrade_code = TestContract::complex_upgrade().bytecode.to_vec(); + + // For this test we'll just put the bytecode onto the force deployer address + storage.set_value( + get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), + BytecodeHash::for_bytecode(&complex_upgrade_code).value(), + ); + storage.store_factory_dep( + BytecodeHash::for_bytecode(&complex_upgrade_code).value(), + complex_upgrade_code, + ); + + // We are measuring computational cost, so prices for pubdata don't matter, while they artificially dilute + // the gas limit + + let batch_env = L1BatchEnv { + fee_input: BatchFeeInput::pubdata_independent(100_000, 100_000, 100_000), + ..default_l1_batch(zksync_types::L1BatchNumber(1)) + }; + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_l1_batch_env(batch_env) + .build::(); + + let bytecodes: Vec<_> = test_data.bytecodes.iter().map(Vec::as_slice).collect(); + vm.vm.insert_bytecodes(&bytecodes); + + let txs_data = populate_mimic_calls(test_data.clone()); + let account = &mut vm.rich_accounts[0]; + + for (i, data) in txs_data.into_iter().enumerate() { + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), + calldata: data, + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction {i} wasn't successful for input: {:#?}", + test_data + ); + } + + // Now we count how much ergs were spent at the end of the batch + // It is assumed that the top level frame is the bootloader + let gas_before = vm.vm.gas_remaining(); + let result = vm + .vm + .finish_batch_with_state_diffs(test_data.state_diffs.clone(), default_pubdata_builder()); + assert!( + !result.result.is_failed(), + "Batch wasn't successful for input: {test_data:?}" + ); + let gas_after = vm.vm.gas_remaining(); + assert_eq!((gas_before - gas_after) as u64, result.statistics.gas_used); + + TestStatistics { + max_used_gas: gas_before - gas_after, + circuit_statistics: result.statistics.circuit_statistic.total() as u64, + execution_metrics_size: result.get_execution_metrics(None).size() as u64, + } +} + +fn generate_state_diffs( + repeated_writes: bool, + small_diff: bool, + number_of_state_diffs: usize, +) -> Vec { + (0..number_of_state_diffs) + .map(|i| { + let address = Address::from_low_u64_be(i as u64); + let key = U256::from(i); + let enumeration_index = if repeated_writes { i + 1 } else { 0 }; + + let (initial_value, final_value) = if small_diff { + // As small as it gets, one byte to denote zeroing out the value + (U256::from(1), U256::from(0)) + } else { + // As large as it gets + (U256::from(0), U256::from(2).pow(255.into())) + }; + + StateDiffRecord { + address, + key, + derived_key: u256_to_h256(i.into()).0, + enumeration_index: enumeration_index as u64, + initial_value, + final_value, + } + }) + .collect() +} + +// A valid zkEVM bytecode has odd number of 32 byte words +fn get_valid_bytecode_length(length: usize) -> usize { + // Firstly ensure that the length is divisible by 32 + let length_padded_to_32 = if length % 32 == 0 { + length + } else { + length + 32 - (length % 32) + }; + + // Then we ensure that the number returned by division by 32 is odd + if length_padded_to_32 % 64 == 0 { + length_padded_to_32 + 32 + } else { + length_padded_to_32 + } +} + +pub(crate) fn test_dry_run_upper_bound() { + // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). + // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` + // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. + const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = + (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; + + // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. + // To get the upper bound, we'll try to do the following: + // 1. Max number of logs. + // 2. Lots of small L2->L1 messages / one large L2->L1 message. + // 3. Lots of small bytecodes / one large bytecode. + // 4. Lots of storage slot updates. + + let statistics = vec![ + // max logs + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, + ..Default::default() + }), + tag: "max_logs".to_string(), + }, + // max messages + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, + // so the max number of pubdata is bound by it + messages: vec![ + vec![0; 0]; + MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) + ], + ..Default::default() + }), + tag: "max_messages".to_string(), + }, + // long message + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it + messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], + ..Default::default() + }), + tag: "long_message".to_string(), + }, + // max bytecodes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each bytecode must be at least 32 bytes long. + // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number + bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], + ..Default::default() + }), + tag: "max_bytecodes".to_string(), + }, + // long bytecode + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + bytecodes: vec![ + vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; + 1 + ], + ..Default::default() + }), + tag: "long_bytecode".to_string(), + }, + // lots of small repeated writes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) + state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), + ..Default::default() + }), + tag: "small_repeated_writes".to_string(), + }, + // lots of big repeated writes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value + state_diffs: generate_state_diffs( + true, + false, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37, + ), + ..Default::default() + }), + tag: "big_repeated_writes".to_string(), + }, + // lots of small initial writes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out + state_diffs: generate_state_diffs( + false, + true, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33, + ), + ..Default::default() + }), + tag: "small_initial_writes".to_string(), + }, + // lots of large initial writes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value + state_diffs: generate_state_diffs( + false, + false, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65, + ), + ..Default::default() + }), + tag: "big_initial_writes".to_string(), + }, + ]; + + // We use 2x overhead for the batch tip compared to the worst estimated scenario. + let max_used_gas = statistics + .iter() + .map(|s| (s.statistics.max_used_gas, s.tag.clone())) + .max() + .unwrap(); + assert!( + max_used_gas.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, + "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", + max_used_gas.1, + max_used_gas.0, + BOOTLOADER_BATCH_TIP_OVERHEAD + ); + + let circuit_statistics = statistics + .iter() + .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) + .max() + .unwrap(); + assert!( + circuit_statistics.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, + "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", + circuit_statistics.1, + circuit_statistics.0, + BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD + ); + + let execution_metrics_size = statistics + .iter() + .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) + .max() + .unwrap(); + assert!( + execution_metrics_size.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, + "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", + execution_metrics_size.1, + execution_metrics_size.0, + BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD + ); +} diff --git a/core/lib/multivm/src/versions/testonly/bootloader.rs b/core/lib/multivm/src/versions/testonly/bootloader.rs new file mode 100644 index 000000000000..4b9b63252d6a --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/bootloader.rs @@ -0,0 +1,44 @@ +use assert_matches::assert_matches; +use zksync_types::U256; + +use super::{get_bootloader, tester::VmTesterBuilder, TestedVm, BASE_SYSTEM_CONTRACTS}; +use crate::interface::{ExecutionResult, Halt, TxExecutionMode}; + +pub(crate) fn test_dummy_bootloader() { + let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); + base_system_contracts.bootloader = get_bootloader("dummy"); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(base_system_contracts) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build::(); + + let result = vm.vm.finish_batch_without_pubdata(); + assert!(!result.result.is_failed()); + + let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); + vm.vm + .verify_required_bootloader_heap(&[(0, correct_first_cell)]); +} + +pub(crate) fn test_bootloader_out_of_gas() { + let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); + base_system_contracts.bootloader = get_bootloader("dummy"); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(base_system_contracts) + .with_bootloader_gas_limit(10) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build::(); + + let res = vm.vm.finish_batch_without_pubdata(); + + assert_matches!( + res.result, + ExecutionResult::Halt { + reason: Halt::BootloaderOutOfGas + } + ); +} diff --git a/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs b/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs new file mode 100644 index 000000000000..f60bc5594143 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs @@ -0,0 +1,44 @@ +use zksync_test_contracts::{TestContract, TxType}; + +use super::{default_pubdata_builder, tester::VmTesterBuilder, TestedVm}; +use crate::{ + interface::{InspectExecutionMode, TxExecutionMode, VmEvent, VmInterfaceExt}, + utils::bytecode, +}; + +pub(crate) fn test_bytecode_publishing() { + // In this test, we aim to ensure that the contents of the compressed bytecodes + // are included as part of the L2->L1 long messages + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let counter = TestContract::counter().bytecode; + let account = &mut vm.rich_accounts[0]; + + let compressed_bytecode = bytecode::compress(counter.to_vec()).unwrap().compressed; + + let tx = account.get_deploy_tx(counter, None, TxType::L2).tx; + assert_eq!(tx.execute.factory_deps.len(), 1); // The deployed bytecode is the only dependency + let push_result = vm.vm.push_transaction(tx); + assert_eq!(push_result.compressed_bytecodes.len(), 1); + assert_eq!(push_result.compressed_bytecodes[0].original, counter); + assert_eq!( + push_result.compressed_bytecodes[0].compressed, + compressed_bytecode + ); + + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + + vm.vm.finish_batch(default_pubdata_builder()); + + let state = vm.vm.get_current_execution_state(); + let long_messages = VmEvent::extract_long_l2_to_l1_messages(&state.events); + assert!( + long_messages.contains(&compressed_bytecode), + "Bytecode not published" + ); +} diff --git a/core/lib/multivm/src/versions/testonly/circuits.rs b/core/lib/multivm/src/versions/testonly/circuits.rs new file mode 100644 index 000000000000..de987a8912db --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/circuits.rs @@ -0,0 +1,73 @@ +use zksync_types::{Address, Execute, U256}; + +use super::tester::VmTesterBuilder; +use crate::{ + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, + versions::testonly::TestedVm, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; + +/// Checks that estimated number of circuits for simple transfer doesn't differ much +/// from hardcoded expected value. +pub(crate) fn test_circuits() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build::(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(Address::repeat_byte(1)), + calldata: Vec::new(), + value: U256::from(1u8), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!res.result.is_failed(), "{res:#?}"); + + let s = res.statistics.circuit_statistic; + // Check `circuit_statistic`. + const EXPECTED: [f32; 13] = [ + 1.34935, 0.15026, 1.66666, 0.00315, 1.0594, 0.00058, 0.00348, 0.00076, 0.11945, 0.14285, + 0.0, 0.0, 0.0, + ]; + let actual = [ + (s.main_vm, "main_vm"), + (s.ram_permutation, "ram_permutation"), + (s.storage_application, "storage_application"), + (s.storage_sorter, "storage_sorter"), + (s.code_decommitter, "code_decommitter"), + (s.code_decommitter_sorter, "code_decommitter_sorter"), + (s.log_demuxer, "log_demuxer"), + (s.events_sorter, "events_sorter"), + (s.keccak256, "keccak256"), + (s.ecrecover, "ecrecover"), + (s.sha256, "sha256"), + (s.secp256k1_verify, "secp256k1_verify"), + (s.transient_storage_checker, "transient_storage_checker"), + ]; + for ((actual, name), expected) in actual.iter().zip(EXPECTED) { + if expected == 0.0 { + assert_eq!( + *actual, expected, + "Check failed for {}, expected {}, actual {}", + name, expected, actual + ); + } else { + let diff = (actual - expected) / expected; + assert!( + diff.abs() < 0.1, + "Check failed for {}, expected {}, actual {}", + name, + expected, + actual + ); + } + } +} diff --git a/core/lib/multivm/src/versions/testonly/code_oracle.rs b/core/lib/multivm/src/versions/testonly/code_oracle.rs new file mode 100644 index 000000000000..e48b434403f2 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/code_oracle.rs @@ -0,0 +1,239 @@ +use ethabi::Token; +use zksync_test_contracts::TestContract; +use zksync_types::{ + bytecode::BytecodeHash, get_known_code_key, h256_to_u256, u256_to_h256, web3::keccak256, + Address, Execute, StorageLogWithPreviousValue, U256, +}; + +use super::{get_empty_storage, tester::VmTesterBuilder, TestedVm}; +use crate::{ + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, + versions::testonly::ContractToDeploy, +}; + +fn generate_large_bytecode() -> Vec { + // This is the maximal possible size of a zkEVM bytecode + vec![2u8; ((1 << 16) - 1) * 32] +} + +pub(crate) fn test_code_oracle() { + let precompiles_contract_address = Address::repeat_byte(1); + let precompile_contract_bytecode = TestContract::precompiles_test().bytecode.to_vec(); + + // Filling the zkevm bytecode + let normal_zkevm_bytecode = TestContract::counter().bytecode; + let normal_zkevm_bytecode_hash = BytecodeHash::for_bytecode(normal_zkevm_bytecode).value(); + let normal_zkevm_bytecode_keccak_hash = keccak256(normal_zkevm_bytecode); + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&normal_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new( + precompile_contract_bytecode, + precompiles_contract_address, + )]) + .with_storage(storage) + .build::(); + + let precompile_contract = TestContract::precompiles_test(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle"); + + vm.vm.insert_bytecodes(&[normal_zkevm_bytecode]); + let account = &mut vm.rich_accounts[0]; + + // Firstly, let's ensure that the contract works. + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(precompiles_contract_address), + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + + // Now, we ask for the same bytecode. We use to partially check whether the memory page with + // the decommitted bytecode gets erased (it shouldn't). + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(precompiles_contract_address), + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx2); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); +} + +fn find_code_oracle_cost_log( + precompiles_contract_address: Address, + logs: &[StorageLogWithPreviousValue], +) -> &StorageLogWithPreviousValue { + logs.iter() + .find(|log| { + *log.log.key.address() == precompiles_contract_address && log.log.key.key().is_zero() + }) + .expect("no code oracle cost log") +} + +pub(crate) fn test_code_oracle_big_bytecode() { + let precompiles_contract_address = Address::repeat_byte(1); + let precompile_contract_bytecode = TestContract::precompiles_test().bytecode.to_vec(); + + let big_zkevm_bytecode = generate_large_bytecode(); + let big_zkevm_bytecode_hash = BytecodeHash::for_bytecode(&big_zkevm_bytecode).value(); + let big_zkevm_bytecode_keccak_hash = keccak256(&big_zkevm_bytecode); + + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&big_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new( + precompile_contract_bytecode, + precompiles_contract_address, + )]) + .with_storage(storage) + .build::(); + + let precompile_contract = TestContract::precompiles_test(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle"); + + vm.vm.insert_bytecodes(&[big_zkevm_bytecode.as_slice()]); + + let account = &mut vm.rich_accounts[0]; + + // Firstly, let's ensure that the contract works. + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(precompiles_contract_address), + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(big_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); +} + +pub(crate) fn test_refunds_in_code_oracle() { + let precompiles_contract_address = Address::repeat_byte(1); + + let normal_zkevm_bytecode = TestContract::counter().bytecode; + let normal_zkevm_bytecode_hash = BytecodeHash::for_bytecode(normal_zkevm_bytecode).value(); + let normal_zkevm_bytecode_keccak_hash = keccak256(normal_zkevm_bytecode); + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&normal_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + let precompile_contract = TestContract::precompiles_test(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle"); + + // Execute code oracle twice with identical VM state that only differs in that the queried bytecode + // is already decommitted the second time. The second call must consume less gas (`decommit` doesn't charge additional gas + // for already decommitted codes). + let mut oracle_costs = vec![]; + for decommit in [false, true] { + let mut vm = VmTesterBuilder::new() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new( + TestContract::precompiles_test().bytecode.to_vec(), + precompiles_contract_address, + )]) + .with_storage(storage.clone()) + .build::(); + + vm.vm.insert_bytecodes(&[normal_zkevm_bytecode]); + + let account = &mut vm.rich_accounts[0]; + if decommit { + let is_fresh = vm.vm.manually_decommit(normal_zkevm_bytecode_hash); + assert!(is_fresh); + } + + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(precompiles_contract_address), + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + let log = + find_code_oracle_cost_log(precompiles_contract_address, &result.logs.storage_logs); + oracle_costs.push(log.log.value); + } + + // The refund is equal to `gasCost` parameter passed to the `decommit` opcode, which is defined as `4 * contract_length_in_words` + // in `CodeOracle.yul`. + let code_oracle_refund = h256_to_u256(oracle_costs[0]) - h256_to_u256(oracle_costs[1]); + assert_eq!( + code_oracle_refund, + (4 * (normal_zkevm_bytecode.len() / 32)).into() + ); +} diff --git a/core/lib/multivm/src/versions/testonly/default_aa.rs b/core/lib/multivm/src/versions/testonly/default_aa.rs new file mode 100644 index 000000000000..9255854e8703 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/default_aa.rs @@ -0,0 +1,62 @@ +use zksync_test_contracts::{DeployContractsTx, TestContract, TxType}; +use zksync_types::{ + get_code_key, get_known_code_key, get_nonce_key, h256_to_u256, + system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, + utils::storage_key_for_eth_balance, + U256, +}; + +use super::{default_pubdata_builder, tester::VmTesterBuilder, TestedVm}; +use crate::{ + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, + vm_latest::utils::fee::get_batch_base_fee, +}; + +pub(crate) fn test_default_aa_interaction() { + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let counter = TestContract::counter().bytecode; + let account = &mut vm.rich_accounts[0]; + let DeployContractsTx { + tx, + bytecode_hash, + address, + } = account.get_deploy_tx(counter, None, TxType::L2); + let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.l1_batch_env); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + + vm.vm.finish_batch(default_pubdata_builder()); + vm.vm.get_current_execution_state(); + + // Both deployment and ordinary nonce should be incremented by one. + let account_nonce_key = get_nonce_key(&account.address); + let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; + + // The code hash of the deployed contract should be marked as republished. + let known_codes_key = get_known_code_key(&bytecode_hash); + + // The contract should be deployed successfully. + let account_code_key = get_code_key(&address); + + let operator_balance_key = storage_key_for_eth_balance(&vm.l1_batch_env.fee_account); + let expected_fee = maximal_fee + - U256::from(result.refunds.gas_refunded) + * U256::from(get_batch_base_fee(&vm.l1_batch_env)); + + let expected_slots = [ + (account_nonce_key, expected_nonce), + (known_codes_key, 1.into()), + (account_code_key, h256_to_u256(bytecode_hash)), + (operator_balance_key, expected_fee), + ]; + vm.vm.verify_required_storage(&expected_slots); +} diff --git a/core/lib/multivm/src/versions/testonly/evm_emulator.rs b/core/lib/multivm/src/versions/testonly/evm_emulator.rs new file mode 100644 index 000000000000..b979efe360db --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/evm_emulator.rs @@ -0,0 +1,640 @@ +use std::collections::HashMap; + +use assert_matches::assert_matches; +use ethabi::Token; +use rand::{rngs::StdRng, Rng, SeedableRng}; +use zksync_contracts::SystemContractCode; +use zksync_system_constants::{ + CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L2_BASE_TOKEN_ADDRESS, +}; +use zksync_test_contracts::{TestContract, TxType}; +use zksync_types::{ + bytecode::BytecodeHash, + get_code_key, get_known_code_key, h256_to_u256, + utils::{key_for_eth_balance, storage_key_for_eth_balance}, + AccountTreeId, Address, Execute, StorageKey, H256, U256, +}; + +use super::{default_system_env, TestedVm, VmTester, VmTesterBuilder}; +use crate::interface::{ + storage::InMemoryStorage, ExecutionResult, TxExecutionMode, VmExecutionResultAndLogs, + VmInterfaceExt, +}; + +fn override_system_contracts(storage: &mut InMemoryStorage) { + let mock_deployer = TestContract::mock_deployer().bytecode.to_vec(); + let mock_deployer_hash = BytecodeHash::for_bytecode(&mock_deployer).value(); + let mock_known_code_storage = TestContract::mock_known_code_storage().bytecode.to_vec(); + let mock_known_code_storage_hash = BytecodeHash::for_bytecode(&mock_known_code_storage).value(); + + storage.set_value(get_code_key(&CONTRACT_DEPLOYER_ADDRESS), mock_deployer_hash); + storage.set_value( + get_known_code_key(&mock_deployer_hash), + H256::from_low_u64_be(1), + ); + storage.set_value( + get_code_key(&KNOWN_CODES_STORAGE_ADDRESS), + mock_known_code_storage_hash, + ); + storage.set_value( + get_known_code_key(&mock_known_code_storage_hash), + H256::from_low_u64_be(1), + ); + storage.store_factory_dep(mock_deployer_hash, mock_deployer); + storage.store_factory_dep(mock_known_code_storage_hash, mock_known_code_storage); +} + +#[derive(Debug)] +struct EvmTestBuilder { + deploy_emulator: bool, + storage: InMemoryStorage, + evm_contract_addresses: Vec
, +} + +impl EvmTestBuilder { + fn new(deploy_emulator: bool, evm_contract_address: Address) -> Self { + Self { + deploy_emulator, + storage: InMemoryStorage::with_system_contracts(), + evm_contract_addresses: vec![evm_contract_address], + } + } + + fn with_mock_deployer(mut self) -> Self { + override_system_contracts(&mut self.storage); + self + } + + fn with_evm_address(mut self, address: Address) -> Self { + self.evm_contract_addresses.push(address); + self + } + + fn build(self) -> VmTester { + let mock_emulator = TestContract::mock_evm_emulator().bytecode.to_vec(); + let mut storage = self.storage; + let mut system_env = default_system_env(); + if self.deploy_emulator { + let evm_bytecode: Vec<_> = (0..32).collect(); + let evm_bytecode_hash = BytecodeHash::for_evm_bytecode(&evm_bytecode).value(); + storage.set_value( + get_known_code_key(&evm_bytecode_hash), + H256::from_low_u64_be(1), + ); + for evm_address in self.evm_contract_addresses { + storage.set_value(get_code_key(&evm_address), evm_bytecode_hash); + } + + system_env.base_system_smart_contracts.evm_emulator = Some(SystemContractCode { + hash: BytecodeHash::for_bytecode(&mock_emulator).value(), + code: mock_emulator, + }); + } else { + let emulator_hash = BytecodeHash::for_bytecode(&mock_emulator).value(); + storage.set_value(get_known_code_key(&emulator_hash), H256::from_low_u64_be(1)); + storage.store_factory_dep(emulator_hash, mock_emulator); + + for evm_address in self.evm_contract_addresses { + storage.set_value(get_code_key(&evm_address), emulator_hash); + // Set `isUserSpace` in the emulator storage to `true`, so that it skips emulator-specific checks + storage.set_value( + StorageKey::new(AccountTreeId::new(evm_address), H256::zero()), + H256::from_low_u64_be(1), + ); + } + } + + VmTesterBuilder::new() + .with_system_env(system_env) + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::() + } +} + +pub(crate) fn test_tracing_evm_contract_deployment() { + let mut storage = InMemoryStorage::with_system_contracts(); + override_system_contracts(&mut storage); + + let mut system_env = default_system_env(); + // The EVM emulator will not be accessed, so we set it to a dummy value. + system_env.base_system_smart_contracts.evm_emulator = + Some(system_env.base_system_smart_contracts.default_aa.clone()); + let mut vm = VmTesterBuilder::new() + .with_system_env(system_env) + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + let account = &mut vm.rich_accounts[0]; + + let args = [Token::Bytes((0..32).collect())]; + let evm_bytecode = ethabi::encode(&args); + let expected_bytecode_hash = BytecodeHash::for_evm_bytecode(&evm_bytecode).value(); + let execute = Execute::for_deploy(expected_bytecode_hash, vec![0; 32], &args); + let deploy_tx = account.get_l2_tx_for_execute(execute, None); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + // The EraVM contract also deployed in a transaction should be filtered out + assert_eq!( + vm_result.dynamic_factory_deps, + HashMap::from([(expected_bytecode_hash, evm_bytecode)]) + ); + + // "Deploy" a bytecode in another transaction and check that the first tx doesn't interfere with the returned `dynamic_factory_deps`. + let args = [Token::Bytes((0..32).rev().collect())]; + let evm_bytecode = ethabi::encode(&args); + let expected_bytecode_hash = BytecodeHash::for_evm_bytecode(&evm_bytecode).value(); + let execute = Execute::for_deploy(expected_bytecode_hash, vec![0; 32], &args); + let deploy_tx = account.get_l2_tx_for_execute(execute, None); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + assert_eq!( + vm_result.dynamic_factory_deps, + HashMap::from([(expected_bytecode_hash, evm_bytecode)]) + ); +} + +pub(crate) fn test_mock_emulator_basics() { + let called_address = Address::repeat_byte(0x23); + let mut vm = EvmTestBuilder::new(true, called_address).build::(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(called_address), + calldata: vec![], + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +const RECIPIENT_ADDRESS: Address = Address::repeat_byte(0x12); + +/// `deploy_emulator = false` here and below tests the mock emulator as an ordinary contract (i.e., sanity-checks its logic). +pub(crate) fn test_mock_emulator_with_payment(deploy_emulator: bool) { + let mut vm = EvmTestBuilder::new(deploy_emulator, RECIPIENT_ADDRESS).build::(); + + let mut current_balance = U256::zero(); + for i in 1_u64..=5 { + let transferred_value = (1_000_000_000 * i).into(); + let vm_result = test_payment( + &mut vm, + &TestContract::mock_evm_emulator().abi, + &mut current_balance, + transferred_value, + ); + + let balance_storage_logs = vm_result.logs.storage_logs.iter().filter_map(|log| { + (*log.log.key.address() == L2_BASE_TOKEN_ADDRESS) + .then_some((*log.log.key.key(), h256_to_u256(log.log.value))) + }); + let balances: HashMap<_, _> = balance_storage_logs.collect(); + assert_eq!( + balances[&key_for_eth_balance(&RECIPIENT_ADDRESS)], + current_balance + ); + } +} + +fn test_payment( + vm: &mut VmTester, + mock_emulator_abi: ðabi::Contract, + balance: &mut U256, + transferred_value: U256, +) -> VmExecutionResultAndLogs { + *balance += transferred_value; + let test_payment_fn = mock_emulator_abi.function("testPayment").unwrap(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(RECIPIENT_ADDRESS), + calldata: test_payment_fn + .encode_input(&[Token::Uint(transferred_value), Token::Uint(*balance)]) + .unwrap(), + value: transferred_value, + factory_deps: vec![], + }, + None, + ); + + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); + vm_result +} + +pub(crate) fn test_mock_emulator_with_recursion( + deploy_emulator: bool, + is_external: bool, +) { + let mock_emulator_abi = &TestContract::mock_evm_emulator().abi; + let recipient_address = Address::repeat_byte(0x12); + let mut vm = EvmTestBuilder::new(deploy_emulator, recipient_address).build::(); + let account = &mut vm.rich_accounts[0]; + + let test_recursion_fn = mock_emulator_abi + .function(if is_external { + "testExternalRecursion" + } else { + "testRecursion" + }) + .unwrap(); + let mut expected_value = U256::one(); + let depth = 50_u32; + for i in 2..=depth { + expected_value *= i; + } + + let factory_deps = if is_external { + vec![TestContract::recursive_test().bytecode.to_vec()] + } else { + vec![] + }; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(recipient_address), + calldata: test_recursion_fn + .encode_input(&[Token::Uint(depth.into()), Token::Uint(expected_value)]) + .unwrap(), + value: 0.into(), + factory_deps, + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} + +pub(crate) fn test_calling_to_mock_emulator_from_native_contract() { + let recipient_address = Address::repeat_byte(0x12); + let mut vm = EvmTestBuilder::new(true, recipient_address).build::(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let deploy_tx = account.get_deploy_tx( + TestContract::recursive_test().bytecode, + Some(&[Token::Address(recipient_address)]), + TxType::L2, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + // Call from the native contract to the EVM emulator. + let test_fn = TestContract::recursive_test().function("recurse"); + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(deploy_tx.address), + calldata: test_fn.encode_input(&[Token::Uint(50.into())]).unwrap(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +pub(crate) fn test_mock_emulator_with_deployment(revert: bool) { + let contract_address = Address::repeat_byte(0xaa); + let mut vm = EvmTestBuilder::new(true, contract_address) + .with_mock_deployer() + .build::(); + let account = &mut vm.rich_accounts[0]; + + let mock_emulator_abi = &TestContract::mock_evm_emulator().abi; + let new_evm_bytecode = vec![0xfe; 96]; + let new_evm_bytecode_hash = BytecodeHash::for_evm_bytecode(&new_evm_bytecode).value(); + + let test_fn = mock_emulator_abi.function("testDeploymentAndCall").unwrap(); + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(contract_address), + calldata: test_fn + .encode_input(&[ + Token::FixedBytes(new_evm_bytecode_hash.0.into()), + Token::Bytes(new_evm_bytecode.clone()), + Token::Bool(revert), + ]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + + assert_eq!(vm_result.result.is_failed(), revert, "{vm_result:?}"); + let expected_dynamic_deps = if revert { + HashMap::new() + } else { + HashMap::from([(new_evm_bytecode_hash, new_evm_bytecode)]) + }; + assert_eq!(vm_result.dynamic_factory_deps, expected_dynamic_deps); + + // Test that a following transaction can decommit / call EVM contracts deployed in the previous transaction. + let test_fn = mock_emulator_abi + .function("testCallToPreviousDeployment") + .unwrap(); + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(contract_address), + calldata: test_fn.encode_input(&[]).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + + if revert { + assert_matches!( + &vm_result.result, + ExecutionResult::Revert { output } + if output.to_string().contains("contract code length") + ); + } else { + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); + } + assert!(vm_result.dynamic_factory_deps.is_empty(), "{vm_result:?}"); +} + +fn encode_deployment(hash: H256, bytecode: Vec) -> Token { + assert_eq!(bytecode.len(), 32); + Token::Tuple(vec![ + Token::FixedBytes(hash.0.to_vec()), + Token::FixedBytes(bytecode), + ]) +} + +pub(crate) fn test_mock_emulator_with_recursive_deployment() { + let contract_address = Address::repeat_byte(0xaa); + let mut vm = EvmTestBuilder::new(true, contract_address) + .with_mock_deployer() + .build::(); + let account = &mut vm.rich_accounts[0]; + + let mock_emulator_abi = &TestContract::mock_evm_emulator().abi; + let bytecodes: HashMap<_, _> = (0_u8..10) + .map(|byte| { + let bytecode = vec![byte; 32]; + (BytecodeHash::for_evm_bytecode(&bytecode).value(), bytecode) + }) + .collect(); + let test_fn = mock_emulator_abi + .function("testRecursiveDeployment") + .unwrap(); + let deployments: Vec<_> = bytecodes + .iter() + .map(|(hash, code)| encode_deployment(*hash, code.clone())) + .collect(); + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(contract_address), + calldata: test_fn.encode_input(&[Token::Array(deployments)]).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); + assert_eq!(vm_result.dynamic_factory_deps, bytecodes); +} + +pub(crate) fn test_mock_emulator_with_partial_reverts() { + for seed in [1, 10, 100, 1_000] { + println!("Testing with RNG seed {seed}"); + let mut rng = StdRng::seed_from_u64(seed); + test_mock_emulator_with_partial_reverts_and_rng::(&mut rng); + } +} + +fn test_mock_emulator_with_partial_reverts_and_rng(rng: &mut impl Rng) { + let contract_address = Address::repeat_byte(0xaa); + let mut vm = EvmTestBuilder::new(true, contract_address) + .with_mock_deployer() + .build::(); + let account = &mut vm.rich_accounts[0]; + + let mock_emulator_abi = &TestContract::mock_evm_emulator().abi; + let all_bytecodes: HashMap<_, _> = (0_u8..10) + .map(|_| { + let bytecode = vec![rng.gen(); 32]; + (BytecodeHash::for_evm_bytecode(&bytecode).value(), bytecode) + }) + .collect(); + let should_revert: Vec<_> = (0..10).map(|_| rng.gen::()).collect(); + + let test_fn = mock_emulator_abi + .function("testDeploymentWithPartialRevert") + .unwrap(); + let deployments: Vec<_> = all_bytecodes + .iter() + .map(|(hash, code)| encode_deployment(*hash, code.clone())) + .collect(); + let revert_tokens: Vec<_> = should_revert.iter().copied().map(Token::Bool).collect(); + + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(contract_address), + calldata: test_fn + .encode_input(&[Token::Array(deployments), Token::Array(revert_tokens)]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); + + let dynamic_deps = &vm_result.dynamic_factory_deps; + assert_eq!( + dynamic_deps.len(), + should_revert + .iter() + .map(|flag| !flag as usize) + .sum::(), + "{dynamic_deps:?}" + ); + for ((bytecode_hash, bytecode), &should_revert) in all_bytecodes.iter().zip(&should_revert) { + assert_eq!( + dynamic_deps.get(bytecode_hash), + (!should_revert).then_some(bytecode), + "hash={bytecode_hash:?}, deps={dynamic_deps:?}" + ); + } +} + +pub(crate) fn test_mock_emulator_with_delegate_call() { + let evm_contract_address = Address::repeat_byte(0xaa); + let other_evm_contract_address = Address::repeat_byte(0xbb); + let mut builder = EvmTestBuilder::new(true, evm_contract_address); + builder.storage.set_value( + storage_key_for_eth_balance(&evm_contract_address), + H256::from_low_u64_be(1_000_000), + ); + builder.storage.set_value( + storage_key_for_eth_balance(&other_evm_contract_address), + H256::from_low_u64_be(2_000_000), + ); + let mut vm = builder + .with_evm_address(other_evm_contract_address) + .build::(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let deploy_tx = + account.get_deploy_tx(TestContract::increment_test().bytecode, None, TxType::L2); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + let test_fn = TestContract::increment_test().function("testDelegateCall"); + // Delegate to the native contract from EVM. + test_delegate_call(&mut vm, test_fn, evm_contract_address, deploy_tx.address); + // Delegate to EVM from the native contract. + test_delegate_call(&mut vm, test_fn, deploy_tx.address, evm_contract_address); + // Delegate to EVM from EVM. + test_delegate_call( + &mut vm, + test_fn, + evm_contract_address, + other_evm_contract_address, + ); +} + +fn test_delegate_call( + vm: &mut VmTester, + test_fn: ðabi::Function, + from: Address, + to: Address, +) { + let account = &mut vm.rich_accounts[0]; + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(from), + calldata: test_fn.encode_input(&[Token::Address(to)]).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} + +pub(crate) fn test_mock_emulator_with_static_call() { + let evm_contract_address = Address::repeat_byte(0xaa); + let other_evm_contract_address = Address::repeat_byte(0xbb); + let mut builder = EvmTestBuilder::new(true, evm_contract_address); + builder.storage.set_value( + storage_key_for_eth_balance(&evm_contract_address), + H256::from_low_u64_be(1_000_000), + ); + builder.storage.set_value( + storage_key_for_eth_balance(&other_evm_contract_address), + H256::from_low_u64_be(2_000_000), + ); + // Set differing read values for tested contracts. The slot index is defined in the contract. + let value_slot = H256::from_low_u64_be(0x123); + builder.storage.set_value( + StorageKey::new(AccountTreeId::new(evm_contract_address), value_slot), + H256::from_low_u64_be(100), + ); + builder.storage.set_value( + StorageKey::new(AccountTreeId::new(other_evm_contract_address), value_slot), + H256::from_low_u64_be(200), + ); + let mut vm = builder + .with_evm_address(other_evm_contract_address) + .build::(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let deploy_tx = + account.get_deploy_tx(TestContract::increment_test().bytecode, None, TxType::L2); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + let test_fn = TestContract::increment_test().function("testStaticCall"); + // Call to the native contract from EVM. + test_static_call(&mut vm, test_fn, evm_contract_address, deploy_tx.address, 0); + // Call to EVM from the native contract. + test_static_call( + &mut vm, + test_fn, + deploy_tx.address, + evm_contract_address, + 100, + ); + // Call to EVM from EVM. + test_static_call( + &mut vm, + test_fn, + evm_contract_address, + other_evm_contract_address, + 200, + ); +} + +fn test_static_call( + vm: &mut VmTester, + test_fn: ðabi::Function, + from: Address, + to: Address, + expected_value: u64, +) { + let account = &mut vm.rich_accounts[0]; + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(from), + calldata: test_fn + .encode_input(&[Token::Address(to), Token::Uint(expected_value.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} diff --git a/core/lib/multivm/src/versions/testonly/gas_limit.rs b/core/lib/multivm/src/versions/testonly/gas_limit.rs new file mode 100644 index 000000000000..789bfb97b217 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/gas_limit.rs @@ -0,0 +1,34 @@ +use zksync_test_contracts::Account; +use zksync_types::{fee::Fee, Execute}; + +use super::{tester::VmTesterBuilder, TestedVm}; +use crate::{ + interface::TxExecutionMode, + vm_latest::constants::{TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, +}; + +/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. +pub(crate) fn test_tx_gas_limit_offset() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let gas_limit = 9999.into(); + let tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: Some(Default::default()), + ..Default::default() + }, + Some(Fee { + gas_limit, + ..Account::default_fee() + }), + ); + + vm.vm.push_transaction(tx); + + let slot = (TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET) as u32; + vm.vm.verify_required_bootloader_heap(&[(slot, gas_limit)]); +} diff --git a/core/lib/multivm/src/versions/testonly/get_used_contracts.rs b/core/lib/multivm/src/versions/testonly/get_used_contracts.rs new file mode 100644 index 000000000000..7bfce535b44d --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/get_used_contracts.rs @@ -0,0 +1,216 @@ +use std::iter; + +use assert_matches::assert_matches; +use ethabi::Token; +use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; +use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_test_contracts::{Account, TestContract, TxType}; +use zksync_types::{ + bytecode::BytecodeHash, h256_to_u256, AccountTreeId, Address, Execute, StorageKey, H256, U256, +}; + +use super::{ + tester::{VmTester, VmTesterBuilder}, + TestedVm, +}; +use crate::{ + interface::{ + ExecutionResult, InspectExecutionMode, TxExecutionMode, VmExecutionResultAndLogs, + VmInterfaceExt, + }, + versions::testonly::ContractToDeploy, +}; + +pub(crate) fn test_get_used_contracts() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + assert!(vm.vm.known_bytecode_hashes().is_empty()); + + // create and push and execute some not-empty factory deps transaction with success status + // to check that `get_decommitted_hashes()` updates + let contract_code = TestContract::counter().bytecode; + let account = &mut vm.rich_accounts[0]; + let tx = account.get_deploy_tx(contract_code, None, TxType::L1 { serial_id: 0 }); + vm.vm.push_transaction(tx.tx.clone()); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed()); + + assert!(vm + .vm + .decommitted_hashes() + .contains(&h256_to_u256(tx.bytecode_hash))); + + // Note: `Default_AA` will be in the list of used contracts if L2 tx is used + assert_eq!(vm.vm.decommitted_hashes(), vm.vm.known_bytecode_hashes()); + + // create push and execute some non-empty factory deps transaction that fails + // (`known_bytecodes` will be updated but we expect `get_decommitted_hashes()` to not be updated) + + let calldata = [1, 2, 3]; + let big_calldata: Vec = calldata + .iter() + .cycle() + .take(calldata.len() * 1024) + .cloned() + .collect(); + let account2 = Account::from_seed(u32::MAX); + assert_ne!(account2.address, account.address); + let tx2 = account2.get_l1_tx( + Execute { + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), + calldata: big_calldata, + value: Default::default(), + factory_deps: vec![vec![1; 32]], + }, + 1, + ); + + vm.vm.push_transaction(tx2.clone()); + + let res2 = vm.vm.execute(InspectExecutionMode::OneTx); + + assert!(res2.result.is_failed()); + + for factory_dep in tx2.execute.factory_deps { + let hash_to_u256 = BytecodeHash::for_bytecode(&factory_dep).value_u256(); + assert!(vm.vm.known_bytecode_hashes().contains(&hash_to_u256)); + assert!(!vm.vm.decommitted_hashes().contains(&hash_to_u256)); + } +} + +/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial +/// decommitment cost (>10,000 gas). +fn inflated_counter_bytecode() -> Vec { + let mut counter_bytecode = TestContract::counter().bytecode.to_vec(); + counter_bytecode.extend( + iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) + .take(10_000) + .flatten(), + ); + counter_bytecode +} + +#[derive(Debug)] +struct ProxyCounterData { + proxy_counter_address: Address, + counter_bytecode_hash: U256, +} + +fn execute_proxy_counter( + gas: u32, +) -> (VmTester, ProxyCounterData, VmExecutionResultAndLogs) { + let counter_bytecode = inflated_counter_bytecode(); + let counter_bytecode_hash = BytecodeHash::for_bytecode(&counter_bytecode).value_u256(); + let counter_address = Address::repeat_byte(0x23); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_custom_contracts(vec![ContractToDeploy::new( + counter_bytecode, + counter_address, + )]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let account = &mut vm.rich_accounts[0]; + let deploy_tx = account.get_deploy_tx( + TestContract::proxy_counter().bytecode, + Some(&[Token::Address(counter_address)]), + TxType::L2, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let decommitted_hashes = vm.vm.decommitted_hashes(); + assert!( + !decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); + + let increment = TestContract::proxy_counter().function("increment"); + let increment_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(deploy_tx.address), + calldata: increment + .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(increment_tx, true); + compression_result.unwrap(); + let data = ProxyCounterData { + proxy_counter_address: deploy_tx.address, + counter_bytecode_hash, + }; + (vm, data, exec_result) +} + +pub(crate) fn test_get_used_contracts_with_far_call() { + let (vm, data, exec_result) = execute_proxy_counter::(100_000); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + let decommitted_hashes = vm.vm.decommitted_hashes(); + assert!( + decommitted_hashes.contains(&data.counter_bytecode_hash), + "{decommitted_hashes:?}" + ); +} + +pub(crate) fn test_get_used_contracts_with_out_of_gas_far_call() { + let (mut vm, data, exec_result) = execute_proxy_counter::(10_000); + assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); + let decommitted_hashes = vm.vm.decommitted_hashes(); + assert!( + decommitted_hashes.contains(&data.counter_bytecode_hash), + "{decommitted_hashes:?}" + ); + + // Execute another transaction with a successful far call and check that it's still charged for decommitment. + let account = &mut vm.rich_accounts[0]; + let increment = TestContract::proxy_counter().function("increment"); + let increment_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(data.proxy_counter_address), + calldata: increment + .encode_input(&[Token::Uint(1.into()), Token::Uint(u64::MAX.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(increment_tx, true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let proxy_counter_cost_key = StorageKey::new( + AccountTreeId::new(data.proxy_counter_address), + H256::from_low_u64_be(1), + ); + let far_call_cost_log = exec_result + .logs + .storage_logs + .iter() + .find(|log| log.log.key == proxy_counter_cost_key) + .expect("no cost log"); + assert!( + far_call_cost_log.previous_value.is_zero(), + "{far_call_cost_log:?}" + ); + let far_call_cost = h256_to_u256(far_call_cost_log.log.value); + assert!(far_call_cost > 10_000.into(), "{far_call_cost}"); +} diff --git a/core/lib/multivm/src/versions/testonly/is_write_initial.rs b/core/lib/multivm/src/versions/testonly/is_write_initial.rs new file mode 100644 index 000000000000..9eb986549c52 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/is_write_initial.rs @@ -0,0 +1,41 @@ +use zksync_test_contracts::{TestContract, TxType}; +use zksync_types::get_nonce_key; + +use super::{tester::VmTesterBuilder, TestedVm}; +use crate::interface::{ + storage::ReadStorage, InspectExecutionMode, TxExecutionMode, VmInterfaceExt, +}; + +pub(crate) fn test_is_write_initial_behaviour() { + // In this test, we check result of `is_write_initial` at different stages. + // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't + // messed up it with the repeated writes during the one batch execution. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + let account = &mut vm.rich_accounts[0]; + + let nonce_key = get_nonce_key(&account.address); + // Check that the next write to the nonce key will be initial. + assert!(vm + .storage + .as_ref() + .borrow_mut() + .is_write_initial(&nonce_key)); + + let tx = account + .get_deploy_tx(TestContract::counter().bytecode, None, TxType::L2) + .tx; + + vm.vm.push_transaction(tx); + vm.vm.execute(InspectExecutionMode::OneTx); + + // Check that `is_write_initial` still returns true for the nonce key. + assert!(vm + .storage + .as_ref() + .borrow_mut() + .is_write_initial(&nonce_key)); +} diff --git a/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs new file mode 100644 index 000000000000..4a39611dfd3c --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs @@ -0,0 +1,225 @@ +use assert_matches::assert_matches; +use ethabi::Token; +use zksync_contracts::l1_messenger_contract; +use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; +use zksync_test_contracts::{TestContract, TxType}; +use zksync_types::{ + get_code_key, get_known_code_key, h256_to_u256, + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, + u256_to_h256, Address, Execute, ExecuteTransactionCommon, U256, +}; + +use super::{tester::VmTesterBuilder, ContractToDeploy, TestedVm, BASE_SYSTEM_CONTRACTS}; +use crate::{ + interface::{ + ExecutionResult, InspectExecutionMode, TxExecutionMode, VmInterfaceExt, VmRevertReason, + }, + utils::StorageWritesDeduplicator, +}; + +pub(crate) fn test_l1_tx_execution() { + // In this test, we try to execute a contract deployment from L1 + // Here instead of marking code hash via the bootloader means, we will be + // using L1->L2 communication, the same it would likely be done during the priority mode. + + // There are always at least 9 initial writes here, because we pay fees from l1: + // - `totalSupply` of ETH token + // - balance of the refund recipient + // - balance of the bootloader + // - `tx_rolling` hash + // - `gasPerPubdataByte` + // - `basePubdataSpent` + // - rolling hash of L2->L1 logs + // - transaction number in block counter + // - L2->L1 log counter in `L1Messenger` + + // TODO(PLA-537): right now we are using 5 slots instead of 9 due to 0 fee for transaction. + let basic_initial_writes = 5; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let account = &mut vm.rich_accounts[0]; + let deploy_tx = account.get_deploy_tx( + TestContract::counter().bytecode, + None, + TxType::L1 { serial_id: 1 }, + ); + let tx_hash = deploy_tx.tx.hash(); + + let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { + shard_id: 0, + is_service: true, + tx_number_in_block: 0, + sender: BOOTLOADER_ADDRESS, + key: tx_hash, + value: u256_to_h256(U256::from(1u32)), + }] + .into_iter() + .map(UserL2ToL1Log) + .collect(); + + vm.vm.push_transaction(deploy_tx.tx.clone()); + + let res = vm.vm.execute(InspectExecutionMode::OneTx); + + // The code hash of the deployed contract should be marked as republished. + let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); + + // The contract should be deployed successfully. + let account_code_key = get_code_key(&deploy_tx.address); + + assert!(!res.result.is_failed()); + + vm.vm.verify_required_storage(&[ + (known_codes_key, U256::from(1)), + (account_code_key, h256_to_u256(deploy_tx.bytecode_hash)), + ]); + assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); + + let tx = account.get_test_contract_transaction( + deploy_tx.address, + true, + None, + false, + TxType::L1 { serial_id: 0 }, + ); + vm.vm.push_transaction(tx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); + let storage_logs = res.logs.storage_logs; + let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); + + // Tx panicked + assert_eq!(res.initial_storage_writes, basic_initial_writes); + + let tx = account.get_test_contract_transaction( + deploy_tx.address, + false, + None, + false, + TxType::L1 { serial_id: 0 }, + ); + vm.vm.push_transaction(tx.clone()); + let res = vm.vm.execute(InspectExecutionMode::OneTx); + let storage_logs = res.logs.storage_logs; + let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); + // We changed one slot inside contract. + assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); + + // No repeated writes + let repeated_writes = res.repeated_storage_writes; + assert_eq!(res.repeated_storage_writes, 0); + + vm.vm.push_transaction(tx); + let storage_logs = vm.vm.execute(InspectExecutionMode::OneTx).logs.storage_logs; + let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); + // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated. + // But now the base pubdata spent has changed too. + assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); + assert_eq!(res.repeated_storage_writes, repeated_writes); + + let tx = account.get_test_contract_transaction( + deploy_tx.address, + false, + Some(10.into()), + false, + TxType::L1 { serial_id: 1 }, + ); + vm.vm.push_transaction(tx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + // Method is not payable tx should fail + assert!(result.result.is_failed(), "The transaction should fail"); + + let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); + assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); + assert_eq!(res.repeated_storage_writes, 1); +} + +pub(crate) fn test_l1_tx_execution_high_gas_limit() { + // In this test, we try to execute an L1->L2 transaction with a high gas limit. + // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, + // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let account = &mut vm.rich_accounts[0]; + + let l1_messenger = l1_messenger_contract(); + + let contract_function = l1_messenger.function("sendToL1").unwrap(); + let params = [ + // Even a message of size 100k should not be able to be sent by a priority transaction + Token::Bytes(vec![0u8; 100_000]), + ]; + let calldata = contract_function.encode_input(¶ms).unwrap(); + + let mut tx = account.get_l1_tx( + Execute { + contract_address: Some(L1_MESSENGER_ADDRESS), + value: 0.into(), + factory_deps: vec![], + calldata, + }, + 0, + ); + + if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { + // Using some large gas limit + data.gas_limit = 300_000_000.into(); + } else { + unreachable!() + }; + + vm.vm.push_transaction(tx); + + let res = vm.vm.execute(InspectExecutionMode::OneTx); + + assert!(res.result.is_failed(), "The transaction should've failed"); +} + +pub(crate) fn test_l1_tx_execution_gas_estimation_with_low_gas() { + let counter_contract = TestContract::counter().bytecode.to_vec(); + let counter_address = Address::repeat_byte(0x11); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_execution_mode(TxExecutionMode::EstimateFee) + .with_custom_contracts(vec![ContractToDeploy::new( + counter_contract, + counter_address, + )]) + .with_rich_accounts(1) + .build::(); + + let account = &mut vm.rich_accounts[0]; + let mut tx = account.get_test_contract_transaction( + counter_address, + false, + None, + false, + TxType::L1 { serial_id: 0 }, + ); + let ExecuteTransactionCommon::L1(data) = &mut tx.common_data else { + unreachable!(); + }; + // This gas limit is chosen so that transaction starts getting executed by the bootloader, but then runs out of gas + // before its execution result is posted. + data.gas_limit = 15_000.into(); + + vm.vm.push_transaction(tx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); + assert_matches!( + &res.result, + ExecutionResult::Revert { output: VmRevertReason::General { msg, .. } } + if msg.contains("reverted with empty reason") + ); +} diff --git a/core/lib/multivm/src/versions/testonly/l2_blocks.rs b/core/lib/multivm/src/versions/testonly/l2_blocks.rs new file mode 100644 index 000000000000..0dfe600b73be --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/l2_blocks.rs @@ -0,0 +1,416 @@ +//! +//! Tests for the bootloader +//! The description for each of the tests can be found in the corresponding `.yul` file. +//! + +use assert_matches::assert_matches; +use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; +use zksync_types::{ + block::{pack_block_info, L2BlockHasher}, + h256_to_u256, u256_to_h256, AccountTreeId, Address, Execute, ExecuteTransactionCommon, + L1BatchNumber, L1TxCommonData, L2BlockNumber, ProtocolVersionId, StorageKey, Transaction, H256, + SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_BLOCK_INFO_POSITION, + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, + U256, +}; + +use super::{default_l1_batch, get_empty_storage, tester::VmTesterBuilder, TestedVm}; +use crate::{ + interface::{ + storage::StorageView, ExecutionResult, Halt, InspectExecutionMode, L2BlockEnv, + TxExecutionMode, VmInterfaceExt, + }, + vm_latest::{ + constants::{TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO}, + utils::l2_blocks::get_l2_block_hash_key, + }, +}; + +fn get_l1_noop() -> Transaction { + Transaction { + common_data: ExecuteTransactionCommon::L1(L1TxCommonData { + sender: Address::repeat_byte(1), + gas_limit: U256::from(2000000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute: Execute { + contract_address: Some(Address::repeat_byte(0xc0)), + calldata: vec![], + value: U256::zero(), + factory_deps: vec![], + }, + received_timestamp_ms: 0, + raw_bytes: None, + } +} + +pub(crate) fn test_l2_block_initialization_timestamp() { + // This test checks that the L2 block initialization works correctly. + // Here we check that the first block must have timestamp that is greater or equal to the timestamp + // of the current batch. + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + // Override the timestamp of the current L2 block to be 0. + vm.vm.push_l2_block_unchecked(L2BlockEnv { + number: 1, + timestamp: 0, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 1, + }); + let l1_tx = get_l1_noop(); + + vm.vm.push_transaction(l1_tx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); + + assert_matches!( + res.result, + ExecutionResult::Halt { reason: Halt::FailedToSetL2Block(msg) } + if msg.contains("timestamp") + ); +} + +pub(crate) fn test_l2_block_initialization_number_non_zero() { + // This test checks that the L2 block initialization works correctly. + // Here we check that the first L2 block number can not be zero. + + let l1_batch = default_l1_batch(L1BatchNumber(1)); + let first_l2_block = L2BlockEnv { + number: 0, + timestamp: l1_batch.timestamp, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 1, + }; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_l1_batch_env(l1_batch) + .with_rich_accounts(1) + .build::(); + + let l1_tx = get_l1_noop(); + + vm.vm.push_transaction(l1_tx); + + set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block); + + let res = vm.vm.execute(InspectExecutionMode::OneTx); + + assert_eq!( + res.result, + ExecutionResult::Halt { + reason: Halt::FailedToSetL2Block( + "L2 block number is never expected to be zero".to_string() + ) + } + ); +} + +fn test_same_l2_block( + expected_error: Option, + override_timestamp: Option, + override_prev_block_hash: Option, +) { + let mut l1_batch = default_l1_batch(L1BatchNumber(1)); + l1_batch.timestamp = 1; + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_l1_batch_env(l1_batch) + .with_rich_accounts(1) + .build::(); + + let l1_tx = get_l1_noop(); + vm.vm.push_transaction(l1_tx.clone()); + let res = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!res.result.is_failed()); + + let mut current_l2_block = vm.l1_batch_env.first_l2_block; + + if let Some(timestamp) = override_timestamp { + current_l2_block.timestamp = timestamp; + } + if let Some(prev_block_hash) = override_prev_block_hash { + current_l2_block.prev_block_hash = prev_block_hash; + } + + if (None, None) == (override_timestamp, override_prev_block_hash) { + current_l2_block.max_virtual_blocks_to_create = 0; + } + + vm.vm.push_transaction(l1_tx); + set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block); + + let result = vm.vm.execute(InspectExecutionMode::OneTx); + + if let Some(err) = expected_error { + assert_eq!(result.result, ExecutionResult::Halt { reason: err }); + } else { + assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); + } +} + +pub(crate) fn test_l2_block_same_l2_block() { + // This test aims to test the case when there are multiple transactions inside the same L2 block. + + // Case 1: Incorrect timestamp + test_same_l2_block::( + Some(Halt::FailedToSetL2Block( + "The timestamp of the same L2 block must be same".to_string(), + )), + Some(0), + None, + ); + + // Case 2: Incorrect previous block hash + test_same_l2_block::( + Some(Halt::FailedToSetL2Block( + "The previous hash of the same L2 block must be same".to_string(), + )), + None, + Some(H256::zero()), + ); + + // Case 3: Correct continuation of the same L2 block + test_same_l2_block::(None, None, None); +} + +fn test_new_l2_block( + first_l2_block: L2BlockEnv, + overriden_second_block_number: Option, + overriden_second_block_timestamp: Option, + overriden_second_block_prev_block_hash: Option, + expected_error: Option, +) { + let mut l1_batch = default_l1_batch(L1BatchNumber(1)); + l1_batch.timestamp = 1; + l1_batch.first_l2_block = first_l2_block; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let l1_tx = get_l1_noop(); + + // Firstly we execute the first transaction + vm.vm.push_transaction(l1_tx.clone()); + vm.vm.execute(InspectExecutionMode::OneTx); + + let mut second_l2_block = vm.l1_batch_env.first_l2_block; + second_l2_block.number += 1; + second_l2_block.timestamp += 1; + second_l2_block.prev_block_hash = vm.vm.last_l2_block_hash(); + + if let Some(block_number) = overriden_second_block_number { + second_l2_block.number = block_number; + } + if let Some(timestamp) = overriden_second_block_timestamp { + second_l2_block.timestamp = timestamp; + } + if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { + second_l2_block.prev_block_hash = prev_block_hash; + } + + vm.vm.push_l2_block_unchecked(second_l2_block); + vm.vm.push_transaction(l1_tx); + + let result = vm.vm.execute(InspectExecutionMode::OneTx); + if let Some(err) = expected_error { + assert_eq!(result.result, ExecutionResult::Halt { reason: err }); + } else { + assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); + } +} + +pub(crate) fn test_l2_block_new_l2_block() { + // This test is aimed to cover potential issue + + let correct_first_block = L2BlockEnv { + number: 1, + timestamp: 1, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 1, + }; + + // Case 1: Block number increasing by more than 1 + test_new_l2_block::( + correct_first_block, + Some(3), + None, + None, + Some(Halt::FailedToSetL2Block( + "Invalid new L2 block number".to_string(), + )), + ); + + // Case 2: Timestamp not increasing + test_new_l2_block::( + correct_first_block, + None, + Some(1), + None, + Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), + ); + + // Case 3: Incorrect previous block hash + test_new_l2_block::( + correct_first_block, + None, + None, + Some(H256::zero()), + Some(Halt::FailedToSetL2Block( + "The current L2 block hash is incorrect".to_string(), + )), + ); + + // Case 4: Correct new block + test_new_l2_block::(correct_first_block, None, None, None, None); +} + +#[allow(clippy::too_many_arguments)] +fn test_first_in_batch( + miniblock_timestamp: u64, + miniblock_number: u32, + pending_txs_hash: H256, + batch_timestamp: u64, + new_batch_timestamp: u64, + batch_number: u32, + proposed_block: L2BlockEnv, + expected_error: Option, +) { + let mut l1_batch = default_l1_batch(L1BatchNumber(1)); + l1_batch.number += 1; + l1_batch.timestamp = new_batch_timestamp; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + let l1_tx = get_l1_noop(); + + // Setting the values provided. + let miniblock_info_slot = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let pending_txs_hash_slot = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, + ); + let batch_info_slot = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_BLOCK_INFO_POSITION, + ); + let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); + + let mut storage = get_empty_storage(); + storage.set_value( + miniblock_info_slot, + u256_to_h256(pack_block_info( + miniblock_number as u64, + miniblock_timestamp, + )), + ); + storage.set_value(pending_txs_hash_slot, pending_txs_hash); + storage.set_value( + batch_info_slot, + u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), + ); + storage.set_value( + prev_block_hash_position, + L2BlockHasher::legacy_hash(L2BlockNumber(miniblock_number - 1)), + ); + // Replace the storage entirely. It's not enough to write to the underlying storage (since read values are already cached + // in the storage view). + *vm.storage.borrow_mut() = StorageView::new(storage); + + // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. + // And then override it with the user-provided value + + let last_l2_block = vm.l1_batch_env.first_l2_block; + let new_l2_block = L2BlockEnv { + number: last_l2_block.number + 1, + timestamp: last_l2_block.timestamp + 1, + prev_block_hash: vm.vm.last_l2_block_hash(), + max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, + }; + + vm.vm.push_l2_block_unchecked(new_l2_block); + vm.vm.push_transaction(l1_tx); + set_manual_l2_block_info(&mut vm.vm, 0, proposed_block); + + let result = vm.vm.execute(InspectExecutionMode::OneTx); + if let Some(err) = expected_error { + assert_eq!(result.result, ExecutionResult::Halt { reason: err }); + } else { + assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); + } +} + +pub(crate) fn test_l2_block_first_in_batch() { + let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); + let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 1, prev_block_hash) + .finalize(ProtocolVersionId::latest()); + test_first_in_batch::( + 1, + 1, + H256::zero(), + 1, + 2, + 1, + L2BlockEnv { + number: 2, + timestamp: 2, + prev_block_hash, + max_virtual_blocks_to_create: 1, + }, + None, + ); + + let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); + let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 8, prev_block_hash) + .finalize(ProtocolVersionId::latest()); + test_first_in_batch::( + 8, + 1, + H256::zero(), + 5, + 12, + 1, + L2BlockEnv { + number: 2, + timestamp: 9, + prev_block_hash, + max_virtual_blocks_to_create: 1, + }, + Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), + ); +} + +fn set_manual_l2_block_info(vm: &mut impl TestedVm, tx_number: usize, block_info: L2BlockEnv) { + let fictive_miniblock_position = + TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; + vm.write_to_bootloader_heap(&[ + (fictive_miniblock_position, block_info.number.into()), + (fictive_miniblock_position + 1, block_info.timestamp.into()), + ( + fictive_miniblock_position + 2, + h256_to_u256(block_info.prev_block_hash), + ), + ( + fictive_miniblock_position + 3, + block_info.max_virtual_blocks_to_create.into(), + ), + ]) +} diff --git a/core/lib/multivm/src/versions/testonly/mod.rs b/core/lib/multivm/src/versions/testonly/mod.rs new file mode 100644 index 000000000000..38a09049b15a --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/mod.rs @@ -0,0 +1,184 @@ +//! Reusable tests and tooling for low-level VM testing. +//! +//! # How it works +//! +//! - [`TestedVm`] defines test-specific VM extensions. It's currently implemented for the latest legacy VM +//! (`vm_latest`) and the fast VM (`vm_fast`). +//! - Submodules of this module define test functions generic by `TestedVm`. Specific VM versions implement `TestedVm` +//! and can create tests based on these test functions with minimum amount of boilerplate code. +//! - Tests use [`VmTester`] built using [`VmTesterBuilder`] to create a VM instance. This allows to set up storage for the VM, +//! custom [`SystemEnv`] / [`L1BatchEnv`], deployed contracts, pre-funded accounts etc. + +use std::{collections::HashSet, rc::Rc}; + +use once_cell::sync::Lazy; +use zksync_contracts::{ + read_bootloader_code, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, +}; +use zksync_types::{ + block::L2BlockHasher, bytecode::BytecodeHash, fee_model::BatchFeeInput, get_code_key, + get_is_account_key, h256_to_u256, u256_to_h256, utils::storage_key_for_eth_balance, Address, + L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, U256, +}; +use zksync_vm_interface::{ + pubdata::PubdataBuilder, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, +}; + +pub(super) use self::tester::{TestedVm, VmTester, VmTesterBuilder}; +use crate::{ + interface::storage::InMemoryStorage, pubdata_builders::RollupPubdataBuilder, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; + +pub(super) mod block_tip; +pub(super) mod bootloader; +pub(super) mod bytecode_publishing; +pub(super) mod circuits; +pub(super) mod code_oracle; +pub(super) mod default_aa; +pub(super) mod evm_emulator; +pub(super) mod gas_limit; +pub(super) mod get_used_contracts; +pub(super) mod is_write_initial; +pub(super) mod l1_tx_execution; +pub(super) mod l2_blocks; +pub(super) mod nonce_holder; +pub(super) mod precompiles; +pub(super) mod refunds; +pub(super) mod require_eip712; +pub(super) mod rollbacks; +pub(super) mod secp256r1; +pub(super) mod simple_execution; +pub(super) mod storage; +mod tester; +pub(super) mod tracing_execution_error; +pub(super) mod transfer; +pub(super) mod upgrade; + +static BASE_SYSTEM_CONTRACTS: Lazy = + Lazy::new(BaseSystemContracts::load_from_disk); + +fn get_empty_storage() -> InMemoryStorage { + InMemoryStorage::with_system_contracts() +} + +pub(crate) fn read_max_depth_contract() -> Vec { + read_zbin_bytecode( + "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", + ) +} + +pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { + let bootloader_code = read_bootloader_code(test); + let bootloader_hash = BytecodeHash::for_bytecode(&bootloader_code).value(); + SystemContractCode { + code: bootloader_code, + hash: bootloader_hash, + } +} + +pub(crate) fn filter_out_base_system_contracts(all_bytecode_hashes: &mut HashSet) { + all_bytecode_hashes.remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); + if let Some(evm_emulator) = &BASE_SYSTEM_CONTRACTS.evm_emulator { + all_bytecode_hashes.remove(&h256_to_u256(evm_emulator.hash)); + } +} + +pub(super) fn default_system_env() -> SystemEnv { + SystemEnv { + zk_porter_available: false, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: BaseSystemContracts::playground(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + chain_id: L2ChainId::from(270), + } +} + +pub(super) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { + // Add a bias to the timestamp to make it more realistic / "random". + let timestamp = 1_700_000_000 + u64::from(number.0); + L1BatchEnv { + previous_batch_hash: None, + number, + timestamp, + fee_input: BatchFeeInput::l1_pegged( + 50_000_000_000, // 50 gwei + 250_000_000, // 0.25 gwei + ), + fee_account: Address::repeat_byte(1), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 1, + timestamp, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 100, + }, + } +} + +pub(super) fn default_pubdata_builder() -> Rc { + Rc::new(RollupPubdataBuilder::new(Address::zero())) +} + +pub(super) fn make_address_rich(storage: &mut InMemoryStorage, address: Address) { + let key = storage_key_for_eth_balance(&address); + storage.set_value(key, u256_to_h256(U256::from(10_u64.pow(19)))); +} + +#[derive(Debug, Clone)] +pub(super) struct ContractToDeploy { + bytecode: Vec, + address: Address, + is_account: bool, + is_funded: bool, +} + +impl ContractToDeploy { + pub fn new(bytecode: Vec, address: Address) -> Self { + Self { + bytecode, + address, + is_account: false, + is_funded: false, + } + } + + pub fn account(bytecode: Vec, address: Address) -> Self { + Self { + bytecode, + address, + is_account: true, + is_funded: false, + } + } + + #[must_use] + pub fn funded(mut self) -> Self { + self.is_funded = true; + self + } + + pub fn insert(&self, storage: &mut InMemoryStorage) { + let deployer_code_key = get_code_key(&self.address); + let bytecode_hash = BytecodeHash::for_bytecode(&self.bytecode).value(); + storage.set_value(deployer_code_key, bytecode_hash); + if self.is_account { + let is_account_key = get_is_account_key(&self.address); + storage.set_value(is_account_key, u256_to_h256(1_u32.into())); + } + storage.store_factory_dep(bytecode_hash, self.bytecode.clone()); + + if self.is_funded { + make_address_rich(storage, self.address); + } + } + + /// Inserts the contracts into the test environment, bypassing the deployer system contract. + pub fn insert_all(contracts: &[Self], storage: &mut InMemoryStorage) { + for contract in contracts { + contract.insert(storage); + } + } +} diff --git a/core/lib/multivm/src/versions/testonly/nonce_holder.rs b/core/lib/multivm/src/versions/testonly/nonce_holder.rs new file mode 100644 index 000000000000..41d5202fbf15 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/nonce_holder.rs @@ -0,0 +1,200 @@ +use zksync_test_contracts::{Account, TestContract}; +use zksync_types::{Execute, ExecuteTransactionCommon, Nonce}; + +use super::{tester::VmTesterBuilder, ContractToDeploy, TestedVm}; +use crate::interface::{ + ExecutionResult, Halt, InspectExecutionMode, TxExecutionMode, TxRevertReason, VmInterfaceExt, + VmRevertReason, +}; + +pub enum NonceHolderTestMode { + SetValueUnderNonce, + IncreaseMinNonceBy5, + IncreaseMinNonceTooMuch, + LeaveNonceUnused, + IncreaseMinNonceBy1, + SwitchToArbitraryOrdering, +} + +impl From for u8 { + fn from(mode: NonceHolderTestMode) -> u8 { + match mode { + NonceHolderTestMode::SetValueUnderNonce => 0, + NonceHolderTestMode::IncreaseMinNonceBy5 => 1, + NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, + NonceHolderTestMode::LeaveNonceUnused => 3, + NonceHolderTestMode::IncreaseMinNonceBy1 => 4, + NonceHolderTestMode::SwitchToArbitraryOrdering => 5, + } + } +} + +fn run_nonce_test( + vm: &mut impl TestedVm, + account: &mut Account, + nonce: u32, + test_mode: NonceHolderTestMode, + error_message: Option, + comment: &'static str, +) { + vm.make_snapshot(); + let mut transaction = account.get_l2_tx_for_execute_with_nonce( + Execute { + contract_address: Some(account.address), + calldata: vec![12], + value: Default::default(), + factory_deps: vec![], + }, + None, + Nonce(nonce), + ); + let ExecuteTransactionCommon::L2(tx_data) = &mut transaction.common_data else { + unreachable!(); + }; + tx_data.signature = vec![test_mode.into()]; + vm.push_transaction(transaction); + let result = vm.execute(InspectExecutionMode::OneTx); + + if let Some(msg) = error_message { + let expected_error = + TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { + msg, + data: vec![], + })); + let ExecutionResult::Halt { reason } = &result.result else { + panic!("Expected revert, got {:?}", result.result); + }; + assert_eq!(reason.to_string(), expected_error.to_string(), "{comment}"); + vm.rollback_to_the_latest_snapshot(); + } else { + assert!(!result.result.is_failed(), "{}", comment); + vm.pop_snapshot_no_rollback(); + } +} + +pub(crate) fn test_nonce_holder() { + let builder = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1); + let account_address = builder.rich_account(0).address; + let mut vm = builder + .with_custom_contracts(vec![ContractToDeploy::account( + TestContract::nonce_holder().bytecode.to_vec(), + account_address, + )]) + .build::(); + let account = &mut vm.rich_accounts[0]; + let hex_addr = hex::encode(account.address.to_fixed_bytes()); + + // Test 1: trying to set value under non sequential nonce value. + run_nonce_test( + &mut vm.vm, + account, + 1u32, + NonceHolderTestMode::SetValueUnderNonce, + Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), + "Allowed to set value under non sequential value", + ); + + // Test 2: increase min nonce by 1 with sequential nonce ordering: + run_nonce_test( + &mut vm.vm, + account, + 0u32, + NonceHolderTestMode::IncreaseMinNonceBy1, + None, + "Failed to increment nonce by 1 for sequential account", + ); + + // Test 3: correctly set value under nonce with sequential nonce ordering: + run_nonce_test( + &mut vm.vm, + account, + 1u32, + NonceHolderTestMode::SetValueUnderNonce, + None, + "Failed to set value under nonce sequential value", + ); + + // Test 5: migrate to the arbitrary nonce ordering: + run_nonce_test( + &mut vm.vm, + account, + 2u32, + NonceHolderTestMode::SwitchToArbitraryOrdering, + None, + "Failed to switch to arbitrary ordering", + ); + + // Test 6: increase min nonce by 5 + run_nonce_test( + &mut vm.vm, + account, + 6u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + None, + "Failed to increase min nonce by 5", + ); + + // Test 7: since the nonces in range [6,10] are no longer allowed, the + // tx with nonce 10 should not be allowed + run_nonce_test( + &mut vm.vm, + account, + 10u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), + "Allowed to reuse nonce below the minimal one", + ); + + // Test 8: we should be able to use nonce 13 + run_nonce_test( + &mut vm.vm, + account, + 13u32, + NonceHolderTestMode::SetValueUnderNonce, + None, + "Did not allow to use unused nonce 10", + ); + + // Test 9: we should not be able to reuse nonce 13 + run_nonce_test( + &mut vm.vm, + account, + 13u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), + "Allowed to reuse the same nonce twice", + ); + + // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 + run_nonce_test( + &mut vm.vm, + account, + 14u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + None, + "Did not allow to use a bumped nonce", + ); + + // Test 11: Do not allow bumping nonce by too much + run_nonce_test( + &mut vm.vm, + account, + 16u32, + NonceHolderTestMode::IncreaseMinNonceTooMuch, + Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), + "Allowed for incrementing min nonce too much", + ); + + // Test 12: Do not allow not setting a nonce as used + run_nonce_test( + &mut vm.vm, + account, + 16u32, + NonceHolderTestMode::LeaveNonceUnused, + Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), + "Allowed to leave nonce as unused", + ); +} diff --git a/core/lib/multivm/src/versions/testonly/precompiles.rs b/core/lib/multivm/src/versions/testonly/precompiles.rs new file mode 100644 index 000000000000..e525bd627646 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/precompiles.rs @@ -0,0 +1,111 @@ +use circuit_sequencer_api_1_5_0::geometry_config::get_geometry_config; +use zksync_test_contracts::TestContract; +use zksync_types::{Address, Execute}; + +use super::{tester::VmTesterBuilder, TestedVm}; +use crate::{ + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, + versions::testonly::ContractToDeploy, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; + +pub(crate) fn test_keccak() { + // Execute special transaction and check that at least 1000 keccak calls were made. + let contract = TestContract::precompiles_test().bytecode.to_vec(); + let address = Address::repeat_byte(1); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) + .build::(); + + // calldata for `doKeccak(1000)`. + let keccak1000_calldata = + "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(address), + calldata: hex::decode(keccak1000_calldata).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx); + + let exec_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let keccak_count = exec_result.statistics.circuit_statistic.keccak256 + * get_geometry_config().cycles_per_keccak256_circuit as f32; + assert!(keccak_count >= 1000.0, "{keccak_count}"); +} + +pub(crate) fn test_sha256() { + // Execute special transaction and check that at least 1000 `sha256` calls were made. + let contract = TestContract::precompiles_test().bytecode.to_vec(); + let address = Address::repeat_byte(1); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) + .build::(); + + // calldata for `doSha256(1000)`. + let sha1000_calldata = + "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(address), + calldata: hex::decode(sha1000_calldata).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx); + + let exec_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let sha_count = exec_result.statistics.circuit_statistic.sha256 + * get_geometry_config().cycles_per_sha256_circuit as f32; + assert!(sha_count >= 1000.0, "{sha_count}"); +} + +pub(crate) fn test_ecrecover() { + // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build::(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(account.address), + calldata: vec![], + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx); + + let exec_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let ecrecover_count = exec_result.statistics.circuit_statistic.ecrecover + * get_geometry_config().cycles_per_ecrecover_circuit as f32; + assert!((ecrecover_count - 1.0).abs() < 1e-4, "{ecrecover_count}"); +} diff --git a/core/lib/multivm/src/versions/testonly/refunds.rs b/core/lib/multivm/src/versions/testonly/refunds.rs new file mode 100644 index 000000000000..384a3edb7dbd --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/refunds.rs @@ -0,0 +1,222 @@ +use ethabi::Token; +use zksync_test_contracts::{TestContract, TxType}; +use zksync_types::{Address, Execute, U256}; + +use super::{default_pubdata_builder, tester::VmTesterBuilder, ContractToDeploy, TestedVm}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; + +pub(crate) fn test_predetermined_refunded_gas() { + // In this test, we compare the execution of the bootloader with the predefined + // refunded gas and without them + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + let l1_batch = vm.l1_batch_env.clone(); + + let account = &mut vm.rich_accounts[0]; + + let tx = account + .get_deploy_tx(TestContract::counter().bytecode, None, TxType::L2) + .tx; + vm.vm.push_transaction(tx.clone()); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + + assert!(!result.result.is_failed()); + + // If the refund provided by the operator or the final refund are the 0 + // there is no impact of the operator's refund at all and so this test does not + // make much sense. + assert!( + result.refunds.operator_suggested_refund > 0, + "The operator's refund is 0" + ); + assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); + + let result_without_predefined_refunds = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; + let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); + assert!(!result_without_predefined_refunds.result.is_failed(),); + + // Here we want to provide the same refund from the operator and check that it's the correct one. + // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. + // But the overall result should be the same + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch.clone()) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + assert_eq!(account.address(), vm.rich_accounts[0].address()); + + vm.vm + .push_transaction_with_refund(tx.clone(), result.refunds.gas_refunded); + + let result_with_predefined_refunds = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; + let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); + + assert!(!result_with_predefined_refunds.result.is_failed()); + + // We need to sort these lists as those are flattened from HashMaps + current_state_with_predefined_refunds + .used_contract_hashes + .sort(); + current_state_without_predefined_refunds + .used_contract_hashes + .sort(); + + assert_eq!( + current_state_with_predefined_refunds.events, + current_state_without_predefined_refunds.events + ); + + assert_eq!( + current_state_with_predefined_refunds.user_l2_to_l1_logs, + current_state_without_predefined_refunds.user_l2_to_l1_logs + ); + + assert_eq!( + current_state_with_predefined_refunds.system_logs, + current_state_without_predefined_refunds.system_logs + ); + + assert_eq!( + current_state_with_predefined_refunds.deduplicated_storage_logs, + current_state_without_predefined_refunds.deduplicated_storage_logs + ); + assert_eq!( + current_state_with_predefined_refunds.used_contract_hashes, + current_state_without_predefined_refunds.used_contract_hashes + ); + + // In this test we put the different refund from the operator. + // We still can't use the refund tracer, because it will override the refund. + // But we can check that the logs and events have changed. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_l1_batch_env(l1_batch) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + assert_eq!(account.address(), vm.rich_accounts[0].address()); + + let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; + vm.vm + .push_transaction_with_refund(tx, changed_operator_suggested_refund); + let result = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; + let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); + + assert!(!result.result.is_failed()); + current_state_with_changed_predefined_refunds + .used_contract_hashes + .sort(); + current_state_without_predefined_refunds + .used_contract_hashes + .sort(); + + assert_eq!( + current_state_with_changed_predefined_refunds.events.len(), + current_state_without_predefined_refunds.events.len() + ); + + assert_ne!( + current_state_with_changed_predefined_refunds.events, + current_state_without_predefined_refunds.events + ); + + assert_eq!( + current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, + current_state_without_predefined_refunds.user_l2_to_l1_logs + ); + + assert_ne!( + current_state_with_changed_predefined_refunds.system_logs, + current_state_without_predefined_refunds.system_logs + ); + + assert_eq!( + current_state_with_changed_predefined_refunds + .deduplicated_storage_logs + .len(), + current_state_without_predefined_refunds + .deduplicated_storage_logs + .len() + ); + + assert_ne!( + current_state_with_changed_predefined_refunds.deduplicated_storage_logs, + current_state_without_predefined_refunds.deduplicated_storage_logs + ); + assert_eq!( + current_state_with_changed_predefined_refunds.used_contract_hashes, + current_state_without_predefined_refunds.used_contract_hashes + ); +} + +pub(crate) fn test_negative_pubdata_for_transaction() { + let expensive_contract_address = Address::repeat_byte(1); + let expensive_contract = TestContract::expensive(); + let expensive_function = expensive_contract.function("expensive"); + let cleanup_function = expensive_contract.function("cleanUp"); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new( + TestContract::expensive().bytecode.to_vec(), + expensive_contract_address, + )]) + .build::(); + + let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: Some(expensive_contract_address), + calldata: expensive_function + .encode_input(&[Token::Uint(10.into())]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(expensive_tx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + + // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. + let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: Some(expensive_contract_address), + calldata: cleanup_function.encode_input(&[]).unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(clean_up_tx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + assert!(result.refunds.operator_suggested_refund > 0); + assert_eq!( + result.refunds.gas_refunded, + result.refunds.operator_suggested_refund + ); +} diff --git a/core/lib/multivm/src/versions/testonly/require_eip712.rs b/core/lib/multivm/src/versions/testonly/require_eip712.rs new file mode 100644 index 000000000000..7a934c570aea --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/require_eip712.rs @@ -0,0 +1,145 @@ +use ethabi::Token; +use zksync_eth_signer::TransactionParameters; +use zksync_test_contracts::TestContract; +use zksync_types::{ + fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, Address, Eip712Domain, Execute, + L2ChainId, Nonce, Transaction, U256, +}; + +use super::{tester::VmTesterBuilder, ContractToDeploy, TestedVm}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; + +/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy +/// and EIP712 transactions. +/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. +pub(crate) fn test_require_eip712() { + // Use 3 accounts: + // - `private_address` - EOA account, where we have the key + // - `account_address` - AA account, where the contract is deployed + // - beneficiary - an EOA account, where we'll try to transfer the tokens. + let aa_address = Address::repeat_byte(0x10); + let beneficiary_address = Address::repeat_byte(0x20); + + let bytecode = TestContract::many_owners().bytecode.to_vec(); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_custom_contracts(vec![ + ContractToDeploy::account(bytecode, aa_address).funded() + ]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + assert_eq!(vm.get_eth_balance(beneficiary_address), U256::from(0)); + let chain_id: u32 = 270; + let mut private_account = vm.rich_accounts[0].clone(); + + // First, let's set the owners of the AA account to the `private_address`. + // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). + let set_owners_function = TestContract::many_owners().function("setOwners"); + let encoded_input = set_owners_function + .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) + .unwrap(); + + let tx = private_account.get_l2_tx_for_execute( + Execute { + contract_address: Some(aa_address), + calldata: encoded_input, + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed()); + + let private_account_balance = vm.get_eth_balance(private_account.address); + + // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). + // Normally this would not work - unless the operator is malicious. + let aa_raw_tx = TransactionParameters { + nonce: U256::from(0), + to: Some(beneficiary_address), + gas: U256::from(100000000), + gas_price: Some(U256::from(10000000)), + value: U256::from(888000088), + data: vec![], + chain_id: 270, + transaction_type: None, + access_list: None, + max_fee_per_gas: U256::from(1000000000), + max_priority_fee_per_gas: U256::from(1000000000), + max_fee_per_blob_gas: None, + blob_versioned_hashes: None, + }; + + let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); + let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); + + let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000, false).unwrap(); + l2_tx.set_input(aa_tx, hash); + // Pretend that operator is malicious and sets the initiator to the AA account. + l2_tx.common_data.initiator_address = aa_address; + let transaction: Transaction = l2_tx.into(); + + vm.vm.push_transaction(transaction); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed()); + + assert_eq!( + vm.get_eth_balance(beneficiary_address), + U256::from(888000088) + ); + // Make sure that the tokens were transferred from the AA account. + assert_eq!( + private_account_balance, + vm.get_eth_balance(private_account.address) + ); + + // // Now send the 'classic' EIP712 transaction + let tx_712 = L2Tx::new( + Some(beneficiary_address), + vec![], + Nonce(1), + Fee { + gas_limit: U256::from(1000000000), + max_fee_per_gas: U256::from(1000000000), + max_priority_fee_per_gas: U256::from(1000000000), + gas_per_pubdata_limit: U256::from(1000000000), + }, + aa_address, + U256::from(28374938), + vec![], + Default::default(), + ); + + let mut transaction_request: TransactionRequest = tx_712.into(); + transaction_request.chain_id = Some(chain_id.into()); + + let domain = Eip712Domain::new(L2ChainId::from(chain_id)); + let signature = private_account + .get_pk_signer() + .sign_typed_data(&domain, &transaction_request) + .unwrap(); + let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); + + let (aa_txn_request, aa_hash) = + TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); + + let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000, false).unwrap(); + l2_tx.set_input(encoded_tx, aa_hash); + + let transaction: Transaction = l2_tx.into(); + vm.vm.push_transaction(transaction); + vm.vm.execute(InspectExecutionMode::OneTx); + + assert_eq!( + vm.get_eth_balance(beneficiary_address), + U256::from(916375026) + ); + assert_eq!( + private_account_balance, + vm.get_eth_balance(private_account.address) + ); +} diff --git a/core/lib/multivm/src/versions/testonly/rollbacks.rs b/core/lib/multivm/src/versions/testonly/rollbacks.rs new file mode 100644 index 000000000000..9a825c08d49b --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/rollbacks.rs @@ -0,0 +1,214 @@ +use std::collections::HashMap; + +use assert_matches::assert_matches; +use ethabi::Token; +use zksync_test_contracts::{ + DeployContractsTx, LoadnextContractExecutionParams, TestContract, TxType, +}; +use zksync_types::{Address, Execute, Nonce, U256}; + +use super::{ + tester::{TransactionTestInfo, TxModifier, VmTesterBuilder}, + ContractToDeploy, TestedVm, +}; +use crate::interface::{storage::ReadStorage, ExecutionResult, TxExecutionMode, VmInterfaceExt}; + +pub(crate) fn test_vm_rollbacks() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let mut account = vm.rich_accounts[0].clone(); + let counter = TestContract::counter().bytecode; + let tx_0 = account.get_deploy_tx(counter, None, TxType::L2).tx; + let tx_1 = account.get_deploy_tx(counter, None, TxType::L2).tx; + let tx_2 = account.get_deploy_tx(counter, None, TxType::L2).tx; + + let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_processed(tx_0.clone(), false), + TransactionTestInfo::new_processed(tx_1.clone(), false), + TransactionTestInfo::new_processed(tx_2.clone(), false), + ]); + + // reset vm + vm.reset_with_empty_storage(); + + let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), + TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), + // The correct nonce is 0, this tx will fail + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), + ), + // This tx will succeed + TransactionTestInfo::new_processed(tx_0.clone(), false), + // The correct nonce is 1, this tx will fail + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), + // The correct nonce is 1, this tx will fail + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), + ), + // This tx will succeed + TransactionTestInfo::new_processed(tx_1, false), + // The correct nonce is 2, this tx will fail + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), + // This tx will succeed + TransactionTestInfo::new_processed(tx_2.clone(), false), + // This tx will fail + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), + ), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), + ]); + + pretty_assertions::assert_eq!(result_without_rollbacks, result_with_rollbacks); +} + +pub(crate) fn test_vm_loadnext_rollbacks() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + let mut account = vm.rich_accounts[0].clone(); + + let loadnext_contract = TestContract::load_test(); + let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; + let DeployContractsTx { + tx: loadnext_deploy_tx, + address, + .. + } = account.get_deploy_tx_with_factory_deps( + loadnext_contract.bytecode, + Some(loadnext_constructor_data), + loadnext_contract.factory_deps(), + TxType::L2, + ); + + let loadnext_tx_1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(address), + calldata: LoadnextContractExecutionParams { + reads: 100, + initial_writes: 100, + repeated_writes: 100, + events: 100, + hashes: 500, + recursive_calls: 10, + deploys: 60, + } + .to_bytes(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + + let loadnext_tx_2 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(address), + calldata: LoadnextContractExecutionParams { + reads: 100, + initial_writes: 100, + repeated_writes: 100, + events: 100, + hashes: 500, + recursive_calls: 10, + deploys: 60, + } + .to_bytes(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + + let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), + TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), + TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), + ]); + + // reset vm + vm.reset_with_empty_storage(); + + let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ + TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), + TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), + TransactionTestInfo::new_rejected( + loadnext_deploy_tx.clone(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), + ), + TransactionTestInfo::new_processed(loadnext_tx_1, false), + TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), + TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), + TransactionTestInfo::new_rejected( + loadnext_deploy_tx.clone(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), + ), + TransactionTestInfo::new_processed(loadnext_tx_2, false), + ]); + + assert_eq!(result_without_rollbacks, result_with_rollbacks); +} + +pub(crate) fn test_rollback_in_call_mode() { + let counter_bytecode = TestContract::counter().bytecode.to_vec(); + let counter_address = Address::repeat_byte(1); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::EthCall) + .with_custom_contracts(vec![ContractToDeploy::new( + counter_bytecode, + counter_address, + )]) + .with_rich_accounts(1) + .build::(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_test_contract_transaction(counter_address, true, None, false, TxType::L2); + + let (compression_result, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + compression_result.unwrap(); + assert_matches!( + vm_result.result, + ExecutionResult::Revert { output } + if output.to_string().contains("This method always reverts") + ); + + let storage_logs = &vm_result.logs.storage_logs; + let deduplicated_logs = storage_logs + .iter() + .filter_map(|log| log.log.is_write().then_some((log.log.key, log.log.value))); + let deduplicated_logs: HashMap<_, _> = deduplicated_logs.collect(); + // Check that all storage changes are reverted + let mut storage = vm.storage.borrow_mut(); + for (key, value) in deduplicated_logs { + assert_eq!(storage.inner_mut().read_value(&key), value); + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs b/core/lib/multivm/src/versions/testonly/secp256r1.rs similarity index 86% rename from core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs rename to core/lib/multivm/src/versions/testonly/secp256r1.rs index 55ca372c4a9f..8a6077ab522f 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs +++ b/core/lib/multivm/src/versions/testonly/secp256r1.rs @@ -1,23 +1,19 @@ use zk_evm_1_5_0::zkevm_opcode_defs::p256; use zksync_system_constants::P256VERIFY_PRECOMPILE_ADDRESS; -use zksync_types::{web3::keccak256, Execute, H256, U256}; -use zksync_utils::h256_to_u256; +use zksync_types::{h256_to_u256, web3::keccak256, Execute, H256, U256}; -use crate::{ - interface::{ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_fast::tests::tester::VmTesterBuilder, -}; +use super::{tester::VmTesterBuilder, TestedVm}; +use crate::interface::{ExecutionResult, InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; -#[test] -fn test_sekp256r1() { +pub(crate) fn test_secp256r1() { // In this test, we aim to test whether a simple account interaction (without any fee logic) // will work. The account will try to deploy a simple contract from integration tests. let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_execution_mode(TxExecutionMode::EthCall) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let account = &mut vm.rich_accounts[0]; @@ -58,7 +54,7 @@ fn test_sekp256r1() { vm.vm.push_transaction(tx); - let execution_result = vm.vm.execute(VmExecutionMode::Batch); + let execution_result = vm.vm.execute(InspectExecutionMode::OneTx); let ExecutionResult::Success { output } = execution_result.result else { panic!("batch failed") diff --git a/core/lib/multivm/src/versions/testonly/simple_execution.rs b/core/lib/multivm/src/versions/testonly/simple_execution.rs new file mode 100644 index 000000000000..13dd7d617d82 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/simple_execution.rs @@ -0,0 +1,77 @@ +use assert_matches::assert_matches; +use zksync_test_contracts::TxType; + +use super::{default_pubdata_builder, tester::VmTesterBuilder, TestedVm}; +use crate::interface::{ExecutionResult, InspectExecutionMode, VmInterfaceExt}; + +pub(crate) fn test_estimate_fee() { + let mut vm_tester = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .build::(); + + vm_tester.deploy_test_contract(); + let account = &mut vm_tester.rich_accounts[0]; + + let tx = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + false, + Default::default(), + false, + TxType::L2, + ); + + vm_tester.vm.push_transaction(tx); + + let result = vm_tester.vm.execute(InspectExecutionMode::OneTx); + assert_matches!(result.result, ExecutionResult::Success { .. }); +} + +pub(crate) fn test_simple_execute() { + let mut vm_tester = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .build::(); + + vm_tester.deploy_test_contract(); + + let account = &mut vm_tester.rich_accounts[0]; + + let tx1 = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + false, + Default::default(), + false, + TxType::L1 { serial_id: 1 }, + ); + + let tx2 = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + true, + Default::default(), + false, + TxType::L1 { serial_id: 1 }, + ); + + let tx3 = account.get_test_contract_transaction( + vm_tester.test_contract.unwrap(), + false, + Default::default(), + false, + TxType::L1 { serial_id: 1 }, + ); + let vm = &mut vm_tester.vm; + vm.push_transaction(tx1); + vm.push_transaction(tx2); + vm.push_transaction(tx3); + let tx = vm.execute(InspectExecutionMode::OneTx); + assert_matches!(tx.result, ExecutionResult::Success { .. }); + let tx = vm.execute(InspectExecutionMode::OneTx); + assert_matches!(tx.result, ExecutionResult::Revert { .. }); + let tx = vm.execute(InspectExecutionMode::OneTx); + assert_matches!(tx.result, ExecutionResult::Success { .. }); + let block_tip = vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; + assert_matches!(block_tip.result, ExecutionResult::Success { .. }); +} diff --git a/core/lib/multivm/src/versions/testonly/storage.rs b/core/lib/multivm/src/versions/testonly/storage.rs new file mode 100644 index 000000000000..d57acc37944a --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/storage.rs @@ -0,0 +1,109 @@ +use ethabi::Token; +use zksync_test_contracts::TestContract; +use zksync_types::{Address, Execute, U256}; + +use super::{tester::VmTesterBuilder, ContractToDeploy, TestedVm}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; + +fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 { + let bytecode = TestContract::storage_test().bytecode.to_vec(); + let test_contract_address = Address::repeat_byte(1); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new(bytecode, test_contract_address)]) + .build::(); + + let account = &mut vm.rich_accounts[0]; + + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(test_contract_address), + calldata: first_tx_calldata, + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(test_contract_address), + calldata: second_tx_calldata, + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed(), "First tx failed"); + vm.vm.pop_snapshot_no_rollback(); + + // We rollback once because transient storage and rollbacks are a tricky combination. + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx2.clone()); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Second tx failed"); + vm.vm.rollback_to_the_latest_snapshot(); + + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx2); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Second tx failed on second run"); + + result.statistics.pubdata_published +} + +fn test_storage_one_tx(second_tx_calldata: Vec) -> u32 { + test_storage::(vec![], second_tx_calldata) +} + +pub(crate) fn test_storage_behavior() { + let contract = TestContract::storage_test(); + + // In all of the tests below we provide the first tx to ensure that the tracers will not include + // the statistics from the start of the bootloader and will only include those for the transaction itself. + + let base_pubdata = test_storage_one_tx::(vec![]); + let simple_test_pubdata = + test_storage_one_tx::(contract.function("simpleWrite").encode_input(&[]).unwrap()); + let resetting_write_pubdata = test_storage_one_tx::( + contract + .function("resettingWrite") + .encode_input(&[]) + .unwrap(), + ); + let resetting_write_via_revert_pubdata = test_storage_one_tx::( + contract + .function("resettingWriteViaRevert") + .encode_input(&[]) + .unwrap(), + ); + + assert_eq!(simple_test_pubdata - base_pubdata, 65); + assert_eq!(resetting_write_pubdata - base_pubdata, 34); + assert_eq!(resetting_write_via_revert_pubdata - base_pubdata, 34); +} + +pub(crate) fn test_transient_storage_behavior() { + let contract = TestContract::storage_test(); + + let first_tstore_test = contract + .function("testTransientStore") + .encode_input(&[]) + .unwrap(); + // Second transaction checks that, as expected, the transient storage is cleared after the first transaction. + let second_tstore_test = contract + .function("assertTValue") + .encode_input(&[Token::Uint(U256::zero())]) + .unwrap(); + + test_storage::(first_tstore_test, second_tstore_test); +} diff --git a/core/lib/multivm/src/versions/testonly/tester/mod.rs b/core/lib/multivm/src/versions/testonly/tester/mod.rs new file mode 100644 index 000000000000..32499e409d82 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/tester/mod.rs @@ -0,0 +1,231 @@ +use std::{collections::HashSet, fmt, rc::Rc}; + +use zksync_contracts::BaseSystemContracts; +use zksync_test_contracts::{Account, TestContract, TxType}; +use zksync_types::{ + utils::{deployed_address_create, storage_key_for_eth_balance}, + writes::StateDiffRecord, + Address, L1BatchNumber, StorageKey, Transaction, H256, U256, +}; +use zksync_vm_interface::{ + pubdata::PubdataBuilder, CurrentExecutionState, InspectExecutionMode, VmExecutionResultAndLogs, + VmInterfaceHistoryEnabled, +}; + +pub(crate) use self::transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; +use super::get_empty_storage; +use crate::{ + interface::{ + storage::{InMemoryStorage, StoragePtr, StorageView}, + L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmFactory, VmInterfaceExt, + }, + versions::testonly::{ + default_l1_batch, default_system_env, make_address_rich, ContractToDeploy, + }, +}; + +mod transaction_test_info; + +/// VM tester that provides prefunded accounts, storage handle etc. +#[derive(Debug)] +pub(crate) struct VmTester { + pub(crate) vm: VM, + pub(crate) system_env: SystemEnv, + pub(crate) l1_batch_env: L1BatchEnv, + pub(crate) storage: StoragePtr>, + pub(crate) test_contract: Option
, + pub(crate) rich_accounts: Vec, +} + +impl VmTester { + pub(crate) fn deploy_test_contract(&mut self) { + let contract = TestContract::counter().bytecode; + let account = &mut self.rich_accounts[0]; + let tx = account.get_deploy_tx(contract, None, TxType::L2).tx; + let nonce = tx.nonce().unwrap().0.into(); + self.vm.push_transaction(tx); + self.vm.execute(InspectExecutionMode::OneTx); + let deployed_address = deployed_address_create(account.address, nonce); + self.test_contract = Some(deployed_address); + } + + pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { + self.vm.read_storage(storage_key_for_eth_balance(&address)) + } + + pub(crate) fn reset_with_empty_storage(&mut self) { + let mut storage = get_empty_storage(); + for account in &self.rich_accounts { + make_address_rich(&mut storage, account.address); + } + + let storage = StorageView::new(storage).to_rc_ptr(); + self.storage = storage.clone(); + self.vm = VM::new(self.l1_batch_env.clone(), self.system_env.clone(), storage); + } +} + +/// Builder for [`VmTester`]. +#[derive(Debug)] +pub(crate) struct VmTesterBuilder { + storage: Option, + l1_batch_env: Option, + system_env: SystemEnv, + rich_accounts: Vec, + custom_contracts: Vec, +} + +impl VmTesterBuilder { + pub(crate) fn new() -> Self { + Self { + storage: None, + l1_batch_env: None, + system_env: default_system_env(), + rich_accounts: vec![], + custom_contracts: vec![], + } + } + + pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { + self.system_env = system_env; + self + } + + pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { + self.l1_batch_env = Some(l1_batch_env); + self + } + + pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { + self.storage = Some(storage); + self + } + + pub(crate) fn with_base_system_smart_contracts( + mut self, + base_system_smart_contracts: BaseSystemContracts, + ) -> Self { + self.system_env.base_system_smart_contracts = base_system_smart_contracts; + self + } + + pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { + self.system_env.bootloader_gas_limit = gas_limit; + self + } + + pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { + self.system_env.execution_mode = execution_mode; + self + } + + pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { + self.storage = Some(get_empty_storage()); + self + } + + /// Creates the specified number of pre-funded accounts. + pub(crate) fn with_rich_accounts(mut self, number: u32) -> Self { + for i in 0..number { + self.rich_accounts.push(Account::from_seed(i)); + } + self + } + + pub(crate) fn rich_account(&self, index: usize) -> &Account { + &self.rich_accounts[index] + } + + pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { + self.custom_contracts = contracts; + self + } + + pub(crate) fn build(self) -> VmTester + where + VM: VmFactory>, + { + let l1_batch_env = self + .l1_batch_env + .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); + + let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); + ContractToDeploy::insert_all(&self.custom_contracts, &mut raw_storage); + let storage = StorageView::new(raw_storage).to_rc_ptr(); + for account in &self.rich_accounts { + make_address_rich(storage.borrow_mut().inner_mut(), account.address); + } + + let vm = VM::new( + l1_batch_env.clone(), + self.system_env.clone(), + storage.clone(), + ); + VmTester { + vm, + system_env: self.system_env, + l1_batch_env, + storage, + test_contract: None, + rich_accounts: self.rich_accounts.clone(), + } + } +} + +/// Test extensions for VM. +pub(crate) trait TestedVm: + VmFactory> + VmInterfaceHistoryEnabled +{ + type StateDump: fmt::Debug + PartialEq; + + fn dump_state(&self) -> Self::StateDump; + + fn gas_remaining(&mut self) -> u32; + + fn get_current_execution_state(&self) -> CurrentExecutionState; + + /// Unlike [`Self::known_bytecode_hashes()`], the output should only include successfully decommitted bytecodes. + fn decommitted_hashes(&self) -> HashSet; + + fn finish_batch_with_state_diffs( + &mut self, + diffs: Vec, + pubdata_builder: Rc, + ) -> VmExecutionResultAndLogs; + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs; + + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]); + + /// Includes bytecodes that have failed to decommit. Should exclude base system contract bytecodes (default AA / EVM emulator). + fn known_bytecode_hashes(&self) -> HashSet; + + /// Returns `true` iff the decommit is fresh. + fn manually_decommit(&mut self, code_hash: H256) -> bool; + + fn verify_required_bootloader_heap(&self, cells: &[(u32, U256)]); + + fn write_to_bootloader_heap(&mut self, cells: &[(usize, U256)]); + + /// Reads storage accounting for changes made during the VM run. + fn read_storage(&mut self, key: StorageKey) -> U256; + + fn verify_required_storage(&mut self, cells: &[(StorageKey, U256)]) { + for &(key, expected_value) in cells { + assert_eq!( + self.read_storage(key), + expected_value, + "Unexpected storage value at {key:?}" + ); + } + } + + /// Returns the current hash of the latest L2 block. + fn last_l2_block_hash(&self) -> H256; + + /// Same as `start_new_l2_block`, but should skip consistency checks (to verify they are performed by the bootloader). + fn push_l2_block_unchecked(&mut self, block: L2BlockEnv); + + /// Pushes a transaction with predefined refund value. + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs similarity index 86% rename from core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs rename to core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs index e2155c02b7e1..b9373e331c30 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs @@ -1,12 +1,12 @@ use zksync_types::{ExecuteTransactionCommon, Nonce, Transaction, H160}; +use super::{TestedVm, VmTester}; use crate::{ interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - VmRevertReason, + CurrentExecutionState, ExecutionResult, Halt, InspectExecutionMode, TxRevertReason, + VmExecutionResultAndLogs, VmInterfaceExt, VmRevertReason, }, - vm_latest::{tests::tester::vm_tester::VmTester, HistoryEnabled}, + versions::testonly::default_pubdata_builder, }; #[derive(Debug, Clone)] @@ -176,7 +176,7 @@ impl TransactionTestInfo { } } -impl VmTester { +impl VmTester { pub(crate) fn execute_and_verify_txs( &mut self, txs: &[TransactionTestInfo], @@ -184,7 +184,7 @@ impl VmTester { for tx_test_info in txs { self.execute_tx_and_verify(tx_test_info.clone()); } - self.vm.execute(VmExecutionMode::Batch); + self.vm.finish_batch(default_pubdata_builder()); let mut state = self.vm.get_current_execution_state(); state.used_contract_hashes.sort(); state @@ -194,19 +194,29 @@ impl VmTester { &mut self, tx_test_info: TransactionTestInfo, ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result + execute_tx_and_verify(&mut self.vm, tx_test_info) + } +} + +fn execute_tx_and_verify( + vm: &mut impl TestedVm, + tx_test_info: TransactionTestInfo, +) -> VmExecutionResultAndLogs { + let inner_state_before = vm.dump_state(); + vm.make_snapshot(); + vm.push_transaction(tx_test_info.tx.clone()); + let result = vm.execute(InspectExecutionMode::OneTx); + tx_test_info.verify_result(&result); + if tx_test_info.should_rollback() { + vm.rollback_to_the_latest_snapshot(); + let inner_state_after = vm.dump_state(); + pretty_assertions::assert_eq!( + inner_state_before, + inner_state_after, + "Inner state before and after rollback should be equal" + ); + } else { + vm.pop_snapshot_no_rollback(); } + result } diff --git a/core/lib/multivm/src/versions/testonly/tracing_execution_error.rs b/core/lib/multivm/src/versions/testonly/tracing_execution_error.rs new file mode 100644 index 000000000000..14b4cb4873bb --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/tracing_execution_error.rs @@ -0,0 +1,50 @@ +use zksync_test_contracts::TestContract; +use zksync_types::{Address, Execute}; + +use super::{tester::VmTesterBuilder, ContractToDeploy, TestedVm, BASE_SYSTEM_CONTRACTS}; +use crate::{ + interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, + versions::testonly::tester::{ExpectedError, TransactionTestInfo}, +}; + +pub(crate) fn test_tracing_of_execution_errors() { + let contract_address = Address::repeat_byte(1); + let bytecode = TestContract::reverts_test().bytecode.to_vec(); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) + .with_custom_contracts(vec![ContractToDeploy::new(bytecode, contract_address)]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let account = &mut vm.rich_accounts[0]; + + let require_fn = TestContract::reverts_test().function("require_short"); + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(contract_address), + calldata: require_fn.encode_input(&[]).unwrap(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + + vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( + tx, + ExpectedError { + revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { + msg: "short".to_string(), + data: vec![ + 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + ], + }), + modifier: None, + }, + )); +} diff --git a/core/lib/multivm/src/versions/testonly/transfer.rs b/core/lib/multivm/src/versions/testonly/transfer.rs new file mode 100644 index 000000000000..1f504b382882 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/transfer.rs @@ -0,0 +1,194 @@ +use ethabi::Token; +use zksync_test_contracts::TestContract; +use zksync_types::{u256_to_h256, utils::storage_key_for_eth_balance, Address, Execute, U256}; + +use super::{ + default_pubdata_builder, get_empty_storage, tester::VmTesterBuilder, ContractToDeploy, TestedVm, +}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; + +enum TestOptions { + Send(U256), + Transfer(U256), +} + +fn test_send_or_transfer(test_option: TestOptions) { + let test_contract = TestContract::transfer_test(); + let test_contract_address = Address::repeat_byte(1); + let recipient_address = Address::repeat_byte(2); + + let (value, calldata) = match test_option { + TestOptions::Send(value) => ( + value, + test_contract + .function("send") + .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) + .unwrap(), + ), + TestOptions::Transfer(value) => ( + value, + test_contract + .function("transfer") + .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) + .unwrap(), + ), + }; + + let mut storage = get_empty_storage(); + storage.set_value( + storage_key_for_eth_balance(&test_contract_address), + u256_to_h256(value), + ); + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ + ContractToDeploy::new( + TestContract::transfer_test().bytecode.to_vec(), + test_contract_address, + ), + ContractToDeploy::new( + TestContract::transfer_recipient().bytecode.to_vec(), + recipient_address, + ), + ]) + .build::(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(test_contract_address), + calldata, + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let tx_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !tx_result.result.is_failed(), + "Transaction wasn't successful" + ); + + let batch_result = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; + assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); + + let new_recipient_balance = vm.get_eth_balance(recipient_address); + assert_eq!(new_recipient_balance, value); +} + +pub(crate) fn test_send_and_transfer() { + test_send_or_transfer::(TestOptions::Send(U256::zero())); + test_send_or_transfer::(TestOptions::Send(U256::from(10).pow(18.into()))); + test_send_or_transfer::(TestOptions::Transfer(U256::zero())); + test_send_or_transfer::(TestOptions::Transfer(U256::from(10).pow(18.into()))); +} + +fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { + let test_contract = TestContract::transfer_test(); + let reentrant_recipient_contract = TestContract::reentrant_recipient(); + let test_contract_address = Address::repeat_byte(1); + let reentrant_recipient_address = Address::repeat_byte(2); + + let (value, calldata) = match test_option { + TestOptions::Send(value) => ( + value, + test_contract + .function("send") + .encode_input(&[ + Token::Address(reentrant_recipient_address), + Token::Uint(value), + ]) + .unwrap(), + ), + TestOptions::Transfer(value) => ( + value, + test_contract + .function("transfer") + .encode_input(&[ + Token::Address(reentrant_recipient_address), + Token::Uint(value), + ]) + .unwrap(), + ), + }; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ + ContractToDeploy::new( + TestContract::transfer_test().bytecode.to_vec(), + test_contract_address, + ), + ContractToDeploy::new( + TestContract::reentrant_recipient().bytecode.to_vec(), + reentrant_recipient_address, + ), + ]) + .build::(); + + // First transaction, the job of which is to warm up the slots for balance of the recipient as well as its storage variable. + let account = &mut vm.rich_accounts[0]; + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(reentrant_recipient_address), + calldata: reentrant_recipient_contract + .function("setX") + .encode_input(&[]) + .unwrap(), + value: U256::from(1), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let tx1_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !tx1_result.result.is_failed(), + "Transaction 1 wasn't successful" + ); + + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(test_contract_address), + calldata, + value, + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx2); + let tx2_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + tx2_result.result.is_failed(), + "Transaction 2 should have failed, but it succeeded" + ); + + let batch_result = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; + assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); +} + +pub(crate) fn test_reentrancy_protection_send_and_transfer() { + test_reentrancy_protection_send_or_transfer::(TestOptions::Send(U256::zero())); + test_reentrancy_protection_send_or_transfer::(TestOptions::Send( + U256::from(10).pow(18.into()), + )); + test_reentrancy_protection_send_or_transfer::(TestOptions::Transfer(U256::zero())); + test_reentrancy_protection_send_or_transfer::(TestOptions::Transfer( + U256::from(10).pow(18.into()), + )); +} diff --git a/core/lib/multivm/src/versions/testonly/upgrade.rs b/core/lib/multivm/src/versions/testonly/upgrade.rs new file mode 100644 index 000000000000..323abf280c7f --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/upgrade.rs @@ -0,0 +1,322 @@ +use zksync_contracts::{deployer_contract, load_sys_contract}; +use zksync_test_contracts::{TestContract, TxType}; +use zksync_types::{ + bytecode::BytecodeHash, + ethabi::{Contract, Token}, + get_code_key, get_known_code_key, h256_to_u256, + protocol_upgrade::ProtocolUpgradeTxCommonData, + u256_to_h256, Address, Execute, ExecuteTransactionCommon, Transaction, + COMPLEX_UPGRADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H256, + REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, +}; + +use super::{get_empty_storage, tester::VmTesterBuilder, TestedVm}; +use crate::interface::{ + ExecutionResult, Halt, InspectExecutionMode, TxExecutionMode, VmInterfaceExt, +}; + +/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: +/// - This transaction must be the only one in block +/// - If present, this transaction must be the first one in block +pub(crate) fn test_protocol_upgrade_is_first() { + let mut storage = get_empty_storage(); + let bytecode_hash = BytecodeHash::for_bytecode(TestContract::counter().bytecode).value(); + storage.set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + // Here we just use some random transaction of protocol upgrade type: + let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash, + // The address on which to deploy the bytecode hash to + address: Address::repeat_byte(1), + // Whether to run the constructor on the force deployment + call_constructor: false, + // The value with which to initialize a contract + value: U256::zero(), + // The constructor calldata + input: vec![], + }]); + + // Another random upgrade transaction + let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash, + // The address on which to deploy the bytecode hash to + address: Address::repeat_byte(2), + // Whether to run the constructor on the force deployment + call_constructor: false, + // The value with which to initialize a contract + value: U256::zero(), + // The constructor calldata + input: vec![], + }]); + + let normal_l1_transaction = vm.rich_accounts[0] + .get_deploy_tx( + TestContract::counter().bytecode, + None, + TxType::L1 { serial_id: 0 }, + ) + .tx; + + let expected_error = + Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); + + vm.vm.make_snapshot(); + // Test 1: there must be only one system transaction in block + vm.vm.push_transaction(protocol_upgrade_transaction.clone()); + vm.vm.push_transaction(normal_l1_transaction.clone()); + vm.vm.push_transaction(another_protocol_upgrade_transaction); + + vm.vm.execute(InspectExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert_eq!( + result.result, + ExecutionResult::Halt { + reason: expected_error.clone() + } + ); + + // Test 2: the protocol upgrade tx must be the first one in block + vm.vm.rollback_to_the_latest_snapshot(); + vm.vm.make_snapshot(); + vm.vm.push_transaction(normal_l1_transaction.clone()); + vm.vm.push_transaction(protocol_upgrade_transaction.clone()); + + vm.vm.execute(InspectExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert_eq!( + result.result, + ExecutionResult::Halt { + reason: expected_error + } + ); + + vm.vm.rollback_to_the_latest_snapshot(); + vm.vm.make_snapshot(); + vm.vm.push_transaction(protocol_upgrade_transaction); + vm.vm.push_transaction(normal_l1_transaction); + + vm.vm.execute(InspectExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed()); +} + +/// In this test we try to test how force deployments could be done via protocol upgrade transactions. +pub(crate) fn test_force_deploy_upgrade() { + let mut storage = get_empty_storage(); + let bytecode_hash = BytecodeHash::for_bytecode(TestContract::counter().bytecode).value(); + let known_code_key = get_known_code_key(&bytecode_hash); + // It is generally expected that all the keys will be set as known prior to the protocol upgrade. + storage.set_value(known_code_key, u256_to_h256(1.into())); + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let address_to_deploy = Address::repeat_byte(1); + // Here we just use some random transaction of protocol upgrade type: + let transaction = get_forced_deploy_tx(&[ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash, + // The address on which to deploy the bytecode hash to + address: address_to_deploy, + // Whether to run the constructor on the force deployment + call_constructor: false, + // The value with which to initialize a contract + value: U256::zero(), + // The constructor calldata + input: vec![], + }]); + + vm.vm.push_transaction(transaction); + + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "The force upgrade was not successful" + ); + + let expected_slots = [( + get_code_key(&address_to_deploy), + h256_to_u256(bytecode_hash), + )]; + // Verify that the bytecode has been set correctly + vm.vm.verify_required_storage(&expected_slots); +} + +/// Here we show how the work with the complex upgrader could be done. +pub(crate) fn test_complex_upgrader() { + let mut storage = get_empty_storage(); + let upgrade_bytecode = TestContract::complex_upgrade().bytecode.to_vec(); + let bytecode_hash = BytecodeHash::for_bytecode(&upgrade_bytecode).value(); + let msg_sender_test_bytecode = TestContract::msg_sender_test().bytecode.to_vec(); + let msg_sender_test_hash = BytecodeHash::for_bytecode(&msg_sender_test_bytecode).value(); + // Let's assume that the bytecode for the implementation of the complex upgrade + // is already deployed in some address in user space + let upgrade_impl = Address::repeat_byte(1); + let account_code_key = get_code_key(&upgrade_impl); + storage.set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); + storage.set_value( + get_known_code_key(&msg_sender_test_hash), + u256_to_h256(1.into()), + ); + storage.set_value(account_code_key, bytecode_hash); + storage.store_factory_dep(bytecode_hash, upgrade_bytecode); + storage.store_factory_dep(msg_sender_test_hash, msg_sender_test_bytecode); + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let address_to_deploy1 = Address::repeat_byte(0xfe); + let address_to_deploy2 = Address::repeat_byte(0xff); + + let transaction = get_complex_upgrade_tx( + upgrade_impl, + address_to_deploy1, + address_to_deploy2, + bytecode_hash, + ); + + vm.vm.push_transaction(transaction); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "The force upgrade was not successful" + ); + + let expected_slots = [ + ( + get_code_key(&address_to_deploy1), + h256_to_u256(bytecode_hash), + ), + ( + get_code_key(&address_to_deploy2), + h256_to_u256(bytecode_hash), + ), + ]; + // Verify that the bytecode has been set correctly + vm.vm.verify_required_storage(&expected_slots); +} + +#[derive(Debug, Clone)] +struct ForceDeployment { + // The bytecode hash to put on an address + bytecode_hash: H256, + // The address on which to deploy the bytecode hash to + address: Address, + // Whether to run the constructor on the force deployment + call_constructor: bool, + // The value with which to initialize a contract + value: U256, + // The constructor calldata + input: Vec, +} + +fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { + let deployer = deployer_contract(); + let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); + + let encoded_deployments: Vec<_> = deployment + .iter() + .map(|deployment| { + Token::Tuple(vec![ + Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), + Token::Address(deployment.address), + Token::Bool(deployment.call_constructor), + Token::Uint(deployment.value), + Token::Bytes(deployment.input.clone()), + ]) + }) + .collect(); + + let params = [Token::Array(encoded_deployments)]; + + let calldata = contract_function + .encode_input(¶ms) + .expect("failed to encode parameters"); + + let execute = Execute { + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), + calldata, + factory_deps: vec![], + value: U256::zero(), + }; + + Transaction { + common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { + sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, + gas_limit: U256::from(200_000_000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute, + received_timestamp_ms: 0, + raw_bytes: None, + } +} + +// Returns the transaction that performs a complex protocol upgrade. +// The first param is the address of the implementation of the complex upgrade +// in user-space, while the next 3 params are params of the implementation itself +// For the explanation for the parameters, please refer to the contract source code. +fn get_complex_upgrade_tx( + implementation_address: Address, + address1: Address, + address2: Address, + bytecode_hash: H256, +) -> Transaction { + let impl_contract = TestContract::complex_upgrade(); + let impl_function = impl_contract.function("someComplexUpgrade"); + let impl_calldata = impl_function + .encode_input(&[ + Token::Address(address1), + Token::Address(address2), + Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), + ]) + .unwrap(); + + let complex_upgrader = get_complex_upgrader_abi(); + let upgrade_function = complex_upgrader.function("upgrade").unwrap(); + let complex_upgrader_calldata = upgrade_function + .encode_input(&[ + Token::Address(implementation_address), + Token::Bytes(impl_calldata), + ]) + .unwrap(); + + let execute = Execute { + contract_address: Some(COMPLEX_UPGRADER_ADDRESS), + calldata: complex_upgrader_calldata, + factory_deps: vec![], + value: U256::zero(), + }; + + Transaction { + common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { + sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, + gas_limit: U256::from(200_000_000u32), + gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Default::default() + }), + execute, + received_timestamp_ms: 0, + raw_bytes: None, + } +} + +fn get_complex_upgrader_abi() -> Contract { + load_sys_contract("ComplexUpgrader") +} diff --git a/core/lib/multivm/src/versions/vm_1_3_2/events.rs b/core/lib/multivm/src/versions/vm_1_3_2/events.rs index 7b1f03c8ac99..0e62312185a2 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/events.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/events.rs @@ -1,8 +1,7 @@ use zk_evm_1_3_3::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use zksync_types::{h256_to_address, L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use crate::interface::VmEvent; +use crate::{interface::VmEvent, utils::bytecode::be_chunks_to_h256_words}; #[derive(Clone)] pub struct SolidityLikeEvent { @@ -135,7 +134,7 @@ pub fn merge_events(events: Vec) -> Vec { .filter(|e| e.address == EVENT_WRITER_ADDRESS) .map(|event| { // The events writer events where the first topic is the actual address of the event and the rest of the topics are real topics - let address = h256_to_account_address(&H256(event.topics[0])); + let address = h256_to_address(&H256(event.topics[0])); let topics = event.topics.into_iter().skip(1).collect(); SolidityLikeEvent { diff --git a/core/lib/multivm/src/versions/vm_1_3_2/history_recorder.rs b/core/lib/multivm/src/versions/vm_1_3_2/history_recorder.rs index 2912fad2841d..bfd33b4b355e 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/history_recorder.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/history_recorder.rs @@ -1,8 +1,7 @@ use std::{collections::HashMap, fmt::Debug, hash::Hash}; use zk_evm_1_3_3::{aux_structures::Timestamp, vm_state::PrimitiveValue, zkevm_opcode_defs}; -use zksync_types::{StorageKey, U256}; -use zksync_utils::{h256_to_u256, u256_to_h256}; +use zksync_types::{h256_to_u256, u256_to_h256, StorageKey, U256}; use crate::interface::storage::{StoragePtr, WriteStorage}; diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/decommitter.rs index e9a85f8ba4b1..779fc126e72c 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/decommitter.rs @@ -6,12 +6,12 @@ use zk_evm_1_3_3::{ DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, }, }; -use zksync_types::U256; -use zksync_utils::{bytecode::bytecode_len_in_words, bytes_to_be_words, u256_to_h256}; +use zksync_types::{u256_to_h256, U256}; use super::OracleWithHistory; use crate::{ interface::storage::{StoragePtr, WriteStorage}, + utils::bytecode::{bytecode_len_in_words, bytes_to_be_words}, vm_1_3_2::history_recorder::{HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory}, }; @@ -59,7 +59,7 @@ impl DecommitterOracle .load_factory_dep(u256_to_h256(hash)) .expect("Trying to decode unexisting hash"); - let value = bytes_to_be_words(value); + let value = bytes_to_be_words(&value); self.known_bytecodes.insert(hash, value.clone(), timestamp); value } diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/storage.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/storage.rs index ac4cc3df1706..e3614cbd471c 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/storage.rs @@ -6,10 +6,9 @@ use zk_evm_1_3_3::{ zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use zksync_types::{ - utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogKind, - BOOTLOADER_ADDRESS, U256, + u256_to_h256, utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, + StorageLogKind, BOOTLOADER_ADDRESS, U256, }; -use zksync_utils::u256_to_h256; use super::OracleWithHistory; use crate::{ diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs index 86ed02365a94..ef2d4f0b5769 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs @@ -9,8 +9,7 @@ use zksync_system_constants::{ ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, SHA256_PRECOMPILE_ADDRESS, }; -use zksync_types::U256; -use zksync_utils::u256_to_h256; +use zksync_types::{u256_to_h256, U256}; use crate::vm_1_3_2::{ history_recorder::HistoryMode, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/validation.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/validation.rs index f52b6b8940db..fbb6795d89a3 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/validation.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/validation.rs @@ -11,13 +11,14 @@ use zksync_system_constants::{ KECCAK256_PRECOMPILE_ADDRESS, L2_BASE_TOKEN_ADDRESS, MSG_VALUE_SIMULATOR_ADDRESS, SYSTEM_CONTEXT_ADDRESS, }; -use zksync_types::{get_code_key, web3::keccak256, AccountTreeId, Address, StorageKey, H256, U256}; -use zksync_utils::{ - be_bytes_to_safe_address, h256_to_account_address, u256_to_account_address, u256_to_h256, +use zksync_types::{ + get_code_key, h256_to_address, u256_to_address, u256_to_h256, web3::keccak256, AccountTreeId, + Address, StorageKey, H256, U256, }; use crate::{ interface::storage::{StoragePtr, WriteStorage}, + utils::bytecode::be_bytes_to_safe_address, vm_1_3_2::{ errors::VmRevertReasonParsingResult, history_recorder::HistoryMode, @@ -242,7 +243,7 @@ impl ValidationTracer { // The user is allowed to touch its own slots or slots semantically related to him. let valid_users_slot = address == self.user_address - || u256_to_account_address(&key) == self.user_address + || u256_to_address(&key) == self.user_address || self.auxilary_allowed_slots.contains(&u256_to_h256(key)); if valid_users_slot { return true; @@ -309,7 +310,7 @@ impl ValidationTracer { let packed_abi = data.src0_value.value; let call_destination_value = data.src1_value.value; - let called_address = u256_to_account_address(&call_destination_value); + let called_address = u256_to_address(&call_destination_value); let far_call_abi = FarCallABI::from_u256(packed_abi); if called_address == KECCAK256_PRECOMPILE_ADDRESS @@ -376,7 +377,7 @@ impl ValidationTracer { let value = self.storage.borrow_mut().read_value(&storage_key); return Ok(NewTrustedValidationItems { - new_trusted_addresses: vec![h256_to_account_address(&value)], + new_trusted_addresses: vec![h256_to_address(&value)], ..Default::default() }); } diff --git a/core/lib/multivm/src/versions/vm_1_3_2/pubdata_utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/pubdata_utils.rs index d88ee70991bc..3c10bd8c48be 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/pubdata_utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/pubdata_utils.rs @@ -3,10 +3,10 @@ use std::collections::HashMap; use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use zk_evm_1_3_3::aux_structures::Timestamp; use zksync_types::{StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; -use zksync_utils::bytecode::bytecode_len_in_bytes; use crate::{ interface::{storage::WriteStorage, VmEvent}, + utils::bytecode::bytecode_len_in_bytes, vm_1_3_2::{history_recorder::HistoryMode, oracles::storage::storage_key_of_log, VmInstance}, }; @@ -30,9 +30,7 @@ impl VmInstance { let published_bytecode_bytes: u32 = VmEvent::extract_published_bytecodes(&events) .iter() - .map(|bytecodehash| { - bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD - }) + .map(|bytecode_hash| bytecode_len_in_bytes(bytecode_hash) + PUBLISH_BYTECODE_OVERHEAD) .sum(); storage_writes_pubdata_published diff --git a/core/lib/multivm/src/versions/vm_1_3_2/refunds.rs b/core/lib/multivm/src/versions/vm_1_3_2/refunds.rs index 163992516d27..b0d70c3522c4 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/refunds.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/refunds.rs @@ -1,6 +1,5 @@ use zk_evm_1_3_3::aux_structures::Timestamp; -use zksync_types::U256; -use zksync_utils::ceil_div_u256; +use zksync_types::{ceil_div_u256, U256}; use crate::{ interface::storage::WriteStorage, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs index 34c70e0f9c45..ac6ce7fcdfcf 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs @@ -12,13 +12,13 @@ use itertools::Itertools; use zk_evm_1_3_3::{aux_structures::Timestamp, vm_state::VmLocalState}; use zksync_contracts::deployer_contract; use zksync_types::{ + address_to_h256, + bytecode::BytecodeHash, ethabi::{Address, Token}, + h256_to_address, u256_to_h256, web3::keccak256, Execute, Nonce, StorageKey, StorageValue, CONTRACT_DEPLOYER_ADDRESS, H256, U256, }; -use zksync_utils::{ - address_to_h256, bytecode::hash_bytecode, h256_to_account_address, u256_to_h256, -}; use crate::interface::storage::WriteStorage; /// The tests here help us with the testing the VM @@ -145,7 +145,7 @@ pub fn get_create_execute(code: &[u8], calldata: &[u8]) -> Execute { let params = [ Token::FixedBytes(vec![0u8; 32]), - Token::FixedBytes(hash_bytecode(code).0.to_vec()), + Token::FixedBytes(BytecodeHash::for_bytecode(code).value().0.to_vec()), Token::Bytes(calldata.to_vec()), ]; let calldata = contract_function @@ -174,7 +174,7 @@ pub fn get_create_zksync_address(sender_address: Address, sender_nonce: Nonce) - let hash = keccak256(&digest); - h256_to_account_address(&H256(hash)) + h256_to_address(&H256(hash)) } pub fn verify_required_storage( diff --git a/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs index 0285320daa30..c2dfe97ed076 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs @@ -1,19 +1,23 @@ use zk_evm_1_3_3::zkevm_opcode_defs::system_params::MAX_TX_ERGS_LIMIT; use zksync_types::{ + address_to_h256, + bytecode::BytecodeHash, + ceil_div_u256, ethabi::{encode, Address, Token}, fee::encoding_len, + h256_to_u256, l1::is_l1_tx_type, l2::TransactionType, ExecuteTransactionCommon, Transaction, MAX_L2_TX_GAS_LIMIT, U256, }; -use zksync_utils::{ - address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, ceil_div_u256, h256_to_u256, -}; use super::vm_with_bootloader::MAX_TXS_IN_BLOCK; -use crate::vm_1_3_2::vm_with_bootloader::{ - BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, - MAX_GAS_PER_PUBDATA_BYTE, +use crate::{ + utils::bytecode::bytes_to_be_words, + vm_1_3_2::vm_with_bootloader::{ + BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, + MAX_GAS_PER_PUBDATA_BYTE, + }, }; // This structure represents the data that is used by @@ -191,16 +195,13 @@ impl TransactionData { let factory_deps_hashes = self .factory_deps .iter() - .map(|dep| h256_to_u256(hash_bytecode(dep))) + .map(|dep| BytecodeHash::for_bytecode(dep).value_u256()) .collect(); self.abi_encode_with_custom_factory_deps(factory_deps_hashes) } pub fn into_tokens(self) -> Vec { - let bytes = self.abi_encode(); - assert!(bytes.len() % 32 == 0); - - bytes_to_be_words(bytes) + bytes_to_be_words(&self.abi_encode()) } pub(crate) fn effective_gas_price_per_pubdata(&self, block_gas_price_per_pubdata: u32) -> u32 { diff --git a/core/lib/multivm/src/versions/vm_1_3_2/utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/utils.rs index 7870b1ff7443..5c72ba204d89 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/utils.rs @@ -7,8 +7,7 @@ use zk_evm_1_3_3::{ }; use zksync_contracts::BaseSystemContracts; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; -use zksync_types::{Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; -use zksync_utils::h256_to_u256; +use zksync_types::{h256_to_u256, Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; use crate::{ interface::storage::WriteStorage, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index 89196788a762..05902b736fbd 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -1,14 +1,14 @@ -use std::collections::HashSet; +use std::{collections::HashSet, rc::Rc}; -use zksync_types::Transaction; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; +use zksync_types::{bytecode::BytecodeHash, h256_to_u256, Transaction}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, - L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, + L2BlockEnv, PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::old::TracerDispatcher, @@ -44,19 +44,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn push_transaction(&mut self, tx: Transaction) { - crate::vm_1_3_2::vm_with_bootloader::push_transaction_to_bootloader_memory( - &mut self.vm, - &tx, - self.system_env.execution_mode.glue_into(), - None, - ) + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + let compressed_bytecodes = + crate::vm_1_3_2::vm_with_bootloader::push_transaction_to_bootloader_memory( + &mut self.vm, + &tx, + self.system_env.execution_mode.glue_into(), + None, + ); + PushTransactionResult { + compressed_bytecodes: compressed_bytecodes.into(), + } } fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { if let Some(storage_invocations) = tracer.storage_invocations { self.vm @@ -65,7 +69,7 @@ impl VmInterface for Vm { } match execution_mode { - VmExecutionMode::OneTx => { + InspectExecutionMode::OneTx => { match self.system_env.execution_mode { TxExecutionMode::VerifyExecute => { let enable_call_tracer = tracer @@ -88,8 +92,7 @@ impl VmInterface for Vm { .glue_into(), } } - VmExecutionMode::Batch => self.finish_batch().block_tip_execution_result, - VmExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), + InspectExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), } } @@ -115,7 +118,7 @@ impl VmInterface for Vm { let mut deps_hashes = HashSet::with_capacity(deps.len()); let mut bytecode_hashes = vec![]; let filtered_deps = deps.iter().filter_map(|bytecode| { - let bytecode_hash = hash_bytecode(bytecode); + let bytecode_hash = BytecodeHash::for_bytecode(bytecode).value(); let is_known = !deps_hashes.insert(bytecode_hash) || self.vm.is_bytecode_known(&bytecode_hash); @@ -179,7 +182,7 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { self.vm .execute_till_block_end( crate::vm_1_3_2::vm_with_bootloader::BootloaderJobType::BlockPostprocessing, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs index d1acdf7708e8..ca9ba097d472 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs @@ -15,16 +15,14 @@ use zk_evm_1_3_3::{ use zksync_contracts::BaseSystemContracts; use zksync_system_constants::MAX_L2_TX_GAS_LIMIT; use zksync_types::{ - fee_model::L1PeggedBatchFeeModelInput, l1::is_l1_tx_type, Address, Transaction, - BOOTLOADER_ADDRESS, L1_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, U256, -}; -use zksync_utils::{ - address_to_u256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, misc::ceil_div, + address_to_u256, bytecode::BytecodeHash, fee_model::L1PeggedBatchFeeModelInput, h256_to_u256, + l1::is_l1_tx_type, Address, Transaction, BOOTLOADER_ADDRESS, L1_GAS_PER_PUBDATA_BYTE, + MAX_NEW_FACTORY_DEPS, U256, }; use crate::{ interface::{storage::WriteStorage, CompressedBytecodeInfo, L1BatchEnv}, - utils::bytecode, + utils::{bytecode, bytecode::bytes_to_be_words}, vm_1_3_2::{ bootloader_state::BootloaderState, history_recorder::HistoryMode, @@ -84,8 +82,11 @@ pub(crate) fn eth_price_per_pubdata_byte(l1_gas_price: u64) -> u64 { pub fn base_fee_to_gas_per_pubdata(l1_gas_price: u64, base_fee: u64) -> u64 { let eth_price_per_pubdata_byte = eth_price_per_pubdata_byte(l1_gas_price); - - ceil_div(eth_price_per_pubdata_byte, base_fee) + if eth_price_per_pubdata_byte == 0 { + 0 + } else { + eth_price_per_pubdata_byte.div_ceil(base_fee) + } } pub(crate) fn derive_base_fee_and_gas_per_pubdata( @@ -102,7 +103,7 @@ pub(crate) fn derive_base_fee_and_gas_per_pubdata( // publish enough public data while compensating us for it. let base_fee = std::cmp::max( fair_l2_gas_price, - ceil_div(eth_price_per_pubdata_byte, MAX_GAS_PER_PUBDATA_BYTE), + eth_price_per_pubdata_byte.div_ceil(MAX_GAS_PER_PUBDATA_BYTE), ); ( @@ -391,7 +392,7 @@ pub fn init_vm_inner( oracle_tools.decommittment_processor.populate( vec![( h256_to_u256(base_system_contract.default_aa.hash), - base_system_contract.default_aa.code.clone(), + bytes_to_be_words(&base_system_contract.default_aa.code), )], Timestamp(0), ); @@ -399,7 +400,7 @@ pub fn init_vm_inner( oracle_tools.memory.populate( vec![( BOOTLOADER_CODE_PAGE, - base_system_contract.bootloader.code.clone(), + bytes_to_be_words(&base_system_contract.bootloader.code), )], Timestamp(0), ); @@ -442,7 +443,7 @@ pub fn get_bootloader_memory( let mut previous_compressed: usize = 0; let mut already_included_txs_size = 0; for (tx_index_in_block, tx) in txs.into_iter().enumerate() { - let compressed_bytecodes = predefined_compressed_bytecodes[tx_index_in_block].clone(); + let compressed_bytecodes = &predefined_compressed_bytecodes[tx_index_in_block]; let mut total_compressed_len_words = 0; for i in compressed_bytecodes.iter() { @@ -475,7 +476,7 @@ pub fn push_transaction_to_bootloader_memory( tx: &Transaction, execution_mode: TxExecutionMode, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { let tx: TransactionData = tx.clone().into(); let block_gas_per_pubdata_byte = vm.block_context.context.block_gas_price_per_pubdata(); let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); @@ -485,7 +486,7 @@ pub fn push_transaction_to_bootloader_memory( execution_mode, overhead, explicit_compressed_bytecodes, - ); + ) } pub fn push_raw_transaction_to_bootloader_memory( @@ -494,7 +495,7 @@ pub fn push_raw_transaction_to_bootloader_memory>, -) { +) -> Vec { let tx_index_in_block = vm.bootloader_state.free_tx_index(); let already_included_txs_size = vm.bootloader_state.free_tx_offset(); @@ -517,7 +518,7 @@ pub fn push_raw_transaction_to_bootloader_memory, + compressed_bytecodes: &[CompressedBytecodeInfo], ) -> Vec<(usize, U256)> { let overhead_gas = tx.overhead_gas(block_gas_per_pubdata); let trusted_gas_limit = tx.trusted_gas_limit(block_gas_per_pubdata); @@ -604,7 +606,7 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( predefined_overhead: u32, trusted_gas_limit: u32, previous_compressed_bytecode_size: usize, - compressed_bytecodes: Vec, + compressed_bytecodes: &[CompressedBytecodeInfo], ) -> Vec<(usize, U256)> { let mut memory: Vec<(usize, U256)> = Vec::default(); let bootloader_description_offset = @@ -640,11 +642,11 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( COMPRESSED_BYTECODES_OFFSET + 1 + previous_compressed_bytecode_size; let memory_addition: Vec<_> = compressed_bytecodes - .into_iter() - .flat_map(|x| bytecode::encode_call(&x)) + .iter() + .flat_map(bytecode::encode_call) .collect(); - let memory_addition = bytes_to_be_words(memory_addition); + let memory_addition = bytes_to_be_words(&memory_addition); memory.extend( (compressed_bytecodes_offset..compressed_bytecodes_offset + memory_addition.len()) @@ -727,11 +729,8 @@ fn formal_calldata_abi() -> PrimitiveValue { } pub(crate) fn bytecode_to_factory_dep(bytecode: Vec) -> (U256, Vec) { - let bytecode_hash = hash_bytecode(&bytecode); - let bytecode_hash = U256::from_big_endian(bytecode_hash.as_bytes()); - - let bytecode_words = bytes_to_be_words(bytecode); - + let bytecode_hash = BytecodeHash::for_bytecode(&bytecode).value_u256(); + let bytecode_words = bytes_to_be_words(&bytecode); (bytecode_hash, bytecode_words) } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/l2_block.rs b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/l2_block.rs index d3c428ab282b..a5157e323408 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/l2_block.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/l2_block.rs @@ -1,7 +1,6 @@ use std::cmp::Ordering; -use zksync_types::{L2BlockNumber, H256}; -use zksync_utils::concat_and_hash; +use zksync_types::{web3::keccak256_concat, L2BlockNumber, H256}; use crate::{ interface::{L2Block, L2BlockEnv}, @@ -53,7 +52,7 @@ impl BootloaderL2Block { } fn update_rolling_hash(&mut self, tx_hash: H256) { - self.txs_rolling_hash = concat_and_hash(self.txs_rolling_hash, tx_hash) + self.txs_rolling_hash = keccak256_concat(self.txs_rolling_hash, tx_hash) } pub(crate) fn interim_version(&self) -> BootloaderL2Block { diff --git a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs index 1acf75b27e1b..33b15e68005b 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs @@ -1,5 +1,4 @@ -use zksync_types::{ethabi, U256}; -use zksync_utils::{bytes_to_be_words, h256_to_u256}; +use zksync_types::{ethabi, h256_to_u256, U256}; use super::tx::BootloaderTx; use crate::{ @@ -25,8 +24,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( .iter() .flat_map(bytecode::encode_call) .collect(); - - bytes_to_be_words(memory_addition) + bytecode::bytes_to_be_words(&memory_addition) } #[allow(clippy::too_many_arguments)] diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/bytecode.rs index 5f24f2465a32..0278e239522b 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/bytecode.rs @@ -1,6 +1,5 @@ use itertools::Itertools; -use zksync_types::U256; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; +use zksync_types::{bytecode::BytecodeHash, U256}; use crate::{ interface::{ @@ -25,18 +24,15 @@ impl Vm { .storage .get_ptr() .borrow_mut() - .is_bytecode_known(&hash_bytecode(&info.original)) + .is_bytecode_known(&BytecodeHash::for_bytecode(&info.original).value()) }) } } /// Converts bytecode to tokens and hashes it. pub(crate) fn bytecode_to_factory_dep(bytecode: Vec) -> (U256, Vec) { - let bytecode_hash = hash_bytecode(&bytecode); - let bytecode_hash = U256::from_big_endian(bytecode_hash.as_bytes()); - - let bytecode_words = bytes_to_be_words(bytecode); - + let bytecode_hash = BytecodeHash::for_bytecode(&bytecode).value_u256(); + let bytecode_words = bytecode::bytes_to_be_words(&bytecode); (bytecode_hash, bytecode_words) } @@ -49,7 +45,11 @@ pub(crate) fn compress_bytecodes( .enumerate() .sorted_by_key(|(_idx, dep)| *dep) .dedup_by(|x, y| x.1 == y.1) - .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) + .filter(|(_idx, dep)| { + !storage + .borrow_mut() + .is_bytecode_known(&BytecodeHash::for_bytecode(dep).value()) + }) .sorted_by_key(|(idx, _dep)| *idx) .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs index cc199fef9416..35ff73071ca6 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs @@ -1,4 +1,4 @@ -use std::mem; +use std::{collections::HashMap, mem}; use zk_evm_1_4_1::aux_structures::Timestamp; @@ -99,7 +99,7 @@ impl Vm { logs, statistics, refunds, - new_known_factory_deps: None, + dynamic_factory_deps: HashMap::new(), // dynamic bytecode deployment is not supported }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_1_4_1/old_vm/events.rs b/core/lib/multivm/src/versions/vm_1_4_1/old_vm/events.rs index ffa4b4d50b8e..bc5befe3810c 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/old_vm/events.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/old_vm/events.rs @@ -1,8 +1,7 @@ use zk_evm_1_4_1::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use zksync_types::{h256_to_address, L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use crate::interface::VmEvent; +use crate::{interface::VmEvent, utils::bytecode::be_chunks_to_h256_words}; #[derive(Clone)] pub(crate) struct SolidityLikeEvent { @@ -135,7 +134,7 @@ pub(crate) fn merge_events(events: Vec) -> Vec .filter(|e| e.address == EVENT_WRITER_ADDRESS) .map(|event| { // The events writer events where the first topic is the actual address of the event and the rest of the topics are real topics - let address = h256_to_account_address(&H256(event.topics[0])); + let address = h256_to_address(&H256(event.topics[0])); let topics = event.topics.into_iter().skip(1).collect(); SolidityLikeEvent { diff --git a/core/lib/multivm/src/versions/vm_1_4_1/old_vm/history_recorder.rs b/core/lib/multivm/src/versions/vm_1_4_1/old_vm/history_recorder.rs index c9d899742202..bfd7b9130f50 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/old_vm/history_recorder.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/old_vm/history_recorder.rs @@ -5,8 +5,7 @@ use zk_evm_1_4_1::{ vm_state::PrimitiveValue, zkevm_opcode_defs::{self}, }; -use zksync_types::{StorageKey, H256, U256}; -use zksync_utils::{h256_to_u256, u256_to_h256}; +use zksync_types::{h256_to_u256, u256_to_h256, StorageKey, H256, U256}; use crate::interface::storage::{StoragePtr, WriteStorage}; diff --git a/core/lib/multivm/src/versions/vm_1_4_1/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_1_4_1/old_vm/oracles/decommitter.rs index 636a4058a037..0fe3efa30b68 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/old_vm/oracles/decommitter.rs @@ -6,16 +6,17 @@ use zk_evm_1_4_1::{ DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, }, }; -use zksync_types::U256; -use zksync_utils::{bytecode::bytecode_len_in_words, bytes_to_be_words, u256_to_h256}; +use zksync_types::{u256_to_h256, U256}; use super::OracleWithHistory; use crate::{ interface::storage::{ReadStorage, StoragePtr}, + utils::bytecode::{bytecode_len_in_words, bytes_to_be_words}, vm_1_4_1::old_vm::history_recorder::{ HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory, }, }; + /// The main job of the DecommiterOracle is to implement the DecommittmentProcessor trait - that is /// used by the VM to 'load' bytecodes into memory. #[derive(Debug)] @@ -60,7 +61,7 @@ impl DecommitterOracle { .load_factory_dep(u256_to_h256(hash)) .expect("Trying to decode unexisting hash"); - let value = bytes_to_be_words(value); + let value = bytes_to_be_words(&value); self.known_bytecodes.insert(hash, value.clone(), timestamp); value } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/oracles/storage.rs b/core/lib/multivm/src/versions/vm_1_4_1/oracles/storage.rs index 3debfd1ca627..921e9b81f71f 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/oracles/storage.rs @@ -6,6 +6,7 @@ use zk_evm_1_4_1::{ zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use zksync_types::{ + u256_to_h256, utils::storage_key_for_eth_balance, writes::{ compression::compress_with_best_strategy, BYTES_PER_DERIVED_KEY, @@ -13,7 +14,6 @@ use zksync_types::{ }, AccountTreeId, Address, StorageKey, StorageLogKind, BOOTLOADER_ADDRESS, U256, }; -use zksync_utils::u256_to_h256; use crate::{ glue::GlueInto, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs index 238804bc7fca..a51c5ce46197 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs @@ -5,19 +5,25 @@ use zk_evm_1_4_1::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, }; -use zksync_types::{writes::StateDiffRecord, AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS}; -use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; +use zksync_types::{ + h256_to_u256, u256_to_h256, writes::StateDiffRecord, AccountTreeId, StorageKey, + L1_MESSENGER_ADDRESS, +}; use crate::{ interface::{ + pubdata::L1MessengerL2ToL1Log, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, }, tracers::dynamic::vm_1_4_1::DynTracer, - utils::events::{ - extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + utils::{ + bytecode::be_words_to_bytes, + events::{ + extract_bytecode_publication_requests_from_l1_messenger, + extract_l2tol1logs_from_l1_messenger, + }, }, vm_1_4_1::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, @@ -99,15 +105,13 @@ impl PubdataTracer { bytecode_publication_requests .iter() .map(|bytecode_publication_request| { - state + let bytecode_words = state .decommittment_processor .known_bytecodes .inner() .get(&h256_to_u256(bytecode_publication_request.bytecode_hash)) - .unwrap() - .iter() - .flat_map(u256_to_bytes_be) - .collect() + .unwrap(); + be_words_to_bytes(bytecode_words) }) .collect() } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_1_4_1/tracers/refunds.rs index 2586d8d7f873..dc945e183a8f 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tracers/refunds.rs @@ -8,8 +8,9 @@ use zk_evm_1_4_1::{ zkevm_opcode_defs::system_params::L1_MESSAGE_PUBDATA_BYTES, }; use zksync_system_constants::{PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; -use zksync_types::{l2_to_l1_log::L2ToL1Log, L1BatchNumber, H256, U256}; -use zksync_utils::{bytecode::bytecode_len_in_bytes, ceil_div_u256, u256_to_h256}; +use zksync_types::{ + ceil_div_u256, l2_to_l1_log::L2ToL1Log, u256_to_h256, L1BatchNumber, H256, U256, +}; use crate::{ interface::{ @@ -18,6 +19,7 @@ use crate::{ L1BatchEnv, Refunds, VmEvent, }, tracers::dynamic::vm_1_4_1::DynTracer, + utils::bytecode::bytecode_len_in_bytes, vm_1_4_1::{ bootloader_state::BootloaderState, constants::{BOOTLOADER_HEAP_PAGE, OPERATOR_REFUNDS_OFFSET, TX_GAS_LIMIT_OFFSET}, @@ -348,7 +350,7 @@ pub(crate) fn pubdata_published( let published_bytecode_bytes: u32 = VmEvent::extract_published_bytecodes(&events) .iter() - .map(|bytecodehash| bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD) + .map(|bytecode_hash| bytecode_len_in_bytes(bytecode_hash) + PUBLISH_BYTECODE_OVERHEAD) .sum(); storage_writes_pubdata_published diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tracers/utils.rs b/core/lib/multivm/src/versions/vm_1_4_1/tracers/utils.rs index 7b24e482b72d..536ea79e22f9 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tracers/utils.rs @@ -9,8 +9,7 @@ use zksync_system_constants::{ ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, SHA256_PRECOMPILE_ADDRESS, }; -use zksync_types::U256; -use zksync_utils::u256_to_h256; +use zksync_types::{u256_to_h256, U256}; use crate::vm_1_4_1::{ constants::{ diff --git a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs index d07732ae4350..f938696297b5 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] @@ -64,7 +64,7 @@ impl PubdataInput { #[cfg(test)] mod tests { use zksync_system_constants::{ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS}; - use zksync_utils::u256_to_h256; + use zksync_types::u256_to_h256; use super::*; diff --git a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs index f7384da76d0d..af9a93f647a2 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs @@ -1,19 +1,24 @@ use std::convert::TryInto; use zksync_types::{ + address_to_h256, + bytecode::BytecodeHash, ethabi::{encode, Address, Token}, fee::{encoding_len, Fee}, + h256_to_u256, l1::is_l1_tx_type, l2::{L2Tx, TransactionType}, transaction_request::{PaymasterParams, TransactionRequest}, web3::Bytes, Execute, ExecuteTransactionCommon, L2ChainId, L2TxCommonData, Nonce, Transaction, H256, U256, }; -use zksync_utils::{address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; -use crate::vm_1_4_1::{ - constants::{L1_TX_TYPE, MAX_GAS_PER_PUBDATA_BYTE, PRIORITY_TX_MAX_GAS_LIMIT}, - utils::overhead::derive_overhead, +use crate::{ + utils::bytecode::bytes_to_be_words, + vm_1_4_1::{ + constants::{L1_TX_TYPE, MAX_GAS_PER_PUBDATA_BYTE, PRIORITY_TX_MAX_GAS_LIMIT}, + utils::overhead::derive_overhead, + }, }; /// This structure represents the data that is used by @@ -190,16 +195,13 @@ impl TransactionData { let factory_deps_hashes = self .factory_deps .iter() - .map(|dep| h256_to_u256(hash_bytecode(dep))) + .map(|dep| BytecodeHash::for_bytecode(dep).value_u256()) .collect(); self.abi_encode_with_custom_factory_deps(factory_deps_hashes) } pub(crate) fn into_tokens(self) -> Vec { - let bytes = self.abi_encode(); - assert!(bytes.len() % 32 == 0); - - bytes_to_be_words(bytes) + bytes_to_be_words(&self.abi_encode()) } pub(crate) fn overhead_gas(&self) -> u32 { diff --git a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/vm_state.rs index b91733c7ca14..9c3ecd9741a3 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/vm_state.rs @@ -11,14 +11,14 @@ use zk_evm_1_4_1::{ }, }; use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::{block::L2BlockHasher, Address, L2BlockNumber}; -use zksync_utils::h256_to_u256; +use zksync_types::{block::L2BlockHasher, h256_to_u256, Address, L2BlockNumber}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, L1BatchEnv, L2Block, SystemEnv, }, + utils::bytecode::bytes_to_be_words, vm_1_4_1::{ bootloader_state::BootloaderState, constants::BOOTLOADER_HEAP_PAGE, @@ -89,11 +89,7 @@ pub(crate) fn new_vm_state( decommittment_processor.populate( vec![( h256_to_u256(system_env.base_system_smart_contracts.default_aa.hash), - system_env - .base_system_smart_contracts - .default_aa - .code - .clone(), + bytes_to_be_words(&system_env.base_system_smart_contracts.default_aa.code), )], Timestamp(0), ); @@ -101,11 +97,7 @@ pub(crate) fn new_vm_state( memory.populate( vec![( BOOTLOADER_CODE_PAGE, - system_env - .base_system_smart_contracts - .bootloader - .code - .clone(), + bytes_to_be_words(&system_env.base_system_smart_contracts.bootloader.code), )], Timestamp(0), ); diff --git a/core/lib/multivm/src/versions/vm_1_4_1/types/l1_batch.rs b/core/lib/multivm/src/versions/vm_1_4_1/types/l1_batch.rs index ca2f0688154b..31807cb66cc1 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/types/l1_batch.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/types/l1_batch.rs @@ -1,5 +1,4 @@ -use zksync_types::U256; -use zksync_utils::{address_to_u256, h256_to_u256}; +use zksync_types::{address_to_u256, h256_to_u256, U256}; use crate::{interface::L1BatchEnv, vm_1_4_1::utils::fee::get_batch_base_fee}; diff --git a/core/lib/multivm/src/versions/vm_1_4_1/utils/fee.rs b/core/lib/multivm/src/versions/vm_1_4_1/utils/fee.rs index b5d4cc971b9e..7f214b457317 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/utils/fee.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/utils/fee.rs @@ -1,6 +1,5 @@ //! Utility functions for vm use zksync_types::fee_model::PubdataIndependentBatchFeeModelInput; -use zksync_utils::ceil_div; use crate::{interface::L1BatchEnv, vm_1_4_1::constants::MAX_GAS_PER_PUBDATA_BYTE}; @@ -18,11 +17,14 @@ pub(crate) fn derive_base_fee_and_gas_per_pubdata( // publish enough public data while compensating us for it. let base_fee = std::cmp::max( fair_l2_gas_price, - ceil_div(fair_pubdata_price, MAX_GAS_PER_PUBDATA_BYTE), + fair_pubdata_price.div_ceil(MAX_GAS_PER_PUBDATA_BYTE), ); - let gas_per_pubdata = ceil_div(fair_pubdata_price, base_fee); - + let gas_per_pubdata = if fair_pubdata_price == 0 { + 0 + } else { + fair_pubdata_price.div_ceil(base_fee) + }; (base_fee, gas_per_pubdata) } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/utils/l2_blocks.rs b/core/lib/multivm/src/versions/vm_1_4_1/utils/l2_blocks.rs index ff5536ae0b97..1095abd82db1 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/utils/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/utils/l2_blocks.rs @@ -4,9 +4,9 @@ use zksync_system_constants::{ SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES, }; use zksync_types::{ - block::unpack_block_info, web3::keccak256, AccountTreeId, L2BlockNumber, StorageKey, H256, U256, + block::unpack_block_info, h256_to_u256, u256_to_h256, web3::keccak256, AccountTreeId, + L2BlockNumber, StorageKey, H256, U256, }; -use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::interface::{ storage::{ReadStorage, StoragePtr}, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs index 4122ee94e66a..af483feedd7e 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs @@ -1,15 +1,18 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_4_1::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, utils::events::extract_l2tol1logs_from_l1_messenger, @@ -81,18 +84,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode, None) + self.inspect_inner(tracer, execution_mode.into(), None) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -123,8 +131,12 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + None, + ); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/l2_block.rs b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/l2_block.rs index d151e3078b4a..a6376852fb28 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/l2_block.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/l2_block.rs @@ -1,7 +1,6 @@ use std::cmp::Ordering; -use zksync_types::{L2BlockNumber, H256}; -use zksync_utils::concat_and_hash; +use zksync_types::{web3::keccak256_concat, L2BlockNumber, H256}; use crate::{ interface::{L2Block, L2BlockEnv}, @@ -53,7 +52,7 @@ impl BootloaderL2Block { } fn update_rolling_hash(&mut self, tx_hash: H256) { - self.txs_rolling_hash = concat_and_hash(self.txs_rolling_hash, tx_hash) + self.txs_rolling_hash = keccak256_concat(self.txs_rolling_hash, tx_hash) } pub(crate) fn interim_version(&self) -> BootloaderL2Block { diff --git a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs index 182f6eff4414..8b367c5c5cae 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs @@ -1,5 +1,4 @@ -use zksync_types::{ethabi, U256}; -use zksync_utils::{bytes_to_be_words, h256_to_u256}; +use zksync_types::{ethabi, h256_to_u256, U256}; use super::tx::BootloaderTx; use crate::{ @@ -25,8 +24,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( .iter() .flat_map(bytecode::encode_call) .collect(); - - bytes_to_be_words(memory_addition) + bytecode::bytes_to_be_words(&memory_addition) } #[allow(clippy::too_many_arguments)] diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/bytecode.rs index 1033fff90e46..f81deff48c25 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/bytecode.rs @@ -1,13 +1,12 @@ use itertools::Itertools; -use zksync_types::U256; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; +use zksync_types::{bytecode::BytecodeHash, U256}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, CompressedBytecodeInfo, }, - utils::bytecode, + utils::{bytecode, bytecode::bytes_to_be_words}, vm_1_4_2::Vm, HistoryMode, }; @@ -25,18 +24,15 @@ impl Vm { .storage .get_ptr() .borrow_mut() - .is_bytecode_known(&hash_bytecode(&info.original)) + .is_bytecode_known(&BytecodeHash::for_bytecode(&info.original).value()) }) } } /// Converts bytecode to tokens and hashes it. pub(crate) fn bytecode_to_factory_dep(bytecode: Vec) -> (U256, Vec) { - let bytecode_hash = hash_bytecode(&bytecode); - let bytecode_hash = U256::from_big_endian(bytecode_hash.as_bytes()); - - let bytecode_words = bytes_to_be_words(bytecode); - + let bytecode_hash = BytecodeHash::for_bytecode(&bytecode).value_u256(); + let bytecode_words = bytes_to_be_words(&bytecode); (bytecode_hash, bytecode_words) } @@ -49,7 +45,11 @@ pub(crate) fn compress_bytecodes( .enumerate() .sorted_by_key(|(_idx, dep)| *dep) .dedup_by(|x, y| x.1 == y.1) - .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) + .filter(|(_idx, dep)| { + !storage + .borrow_mut() + .is_bytecode_known(&BytecodeHash::for_bytecode(dep).value()) + }) .sorted_by_key(|(idx, _dep)| *idx) .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs index f6e49cd8b149..341584168be4 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use zk_evm_1_4_1::aux_structures::Timestamp; use crate::{ @@ -96,7 +98,7 @@ impl Vm { logs, statistics, refunds, - new_known_factory_deps: None, + dynamic_factory_deps: HashMap::new(), // dynamic bytecode deployment is not supported }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_1_4_2/old_vm/events.rs b/core/lib/multivm/src/versions/vm_1_4_2/old_vm/events.rs index ffa4b4d50b8e..bc5befe3810c 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/old_vm/events.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/old_vm/events.rs @@ -1,8 +1,7 @@ use zk_evm_1_4_1::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use zksync_types::{h256_to_address, L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use crate::interface::VmEvent; +use crate::{interface::VmEvent, utils::bytecode::be_chunks_to_h256_words}; #[derive(Clone)] pub(crate) struct SolidityLikeEvent { @@ -135,7 +134,7 @@ pub(crate) fn merge_events(events: Vec) -> Vec .filter(|e| e.address == EVENT_WRITER_ADDRESS) .map(|event| { // The events writer events where the first topic is the actual address of the event and the rest of the topics are real topics - let address = h256_to_account_address(&H256(event.topics[0])); + let address = h256_to_address(&H256(event.topics[0])); let topics = event.topics.into_iter().skip(1).collect(); SolidityLikeEvent { diff --git a/core/lib/multivm/src/versions/vm_1_4_2/old_vm/history_recorder.rs b/core/lib/multivm/src/versions/vm_1_4_2/old_vm/history_recorder.rs index d8d32a2b6c50..9e562de59866 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/old_vm/history_recorder.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/old_vm/history_recorder.rs @@ -5,8 +5,7 @@ use zk_evm_1_4_1::{ vm_state::PrimitiveValue, zkevm_opcode_defs::{self}, }; -use zksync_types::{StorageKey, H256, U256}; -use zksync_utils::{h256_to_u256, u256_to_h256}; +use zksync_types::{h256_to_u256, u256_to_h256, StorageKey, H256, U256}; use crate::interface::storage::{StoragePtr, WriteStorage}; diff --git a/core/lib/multivm/src/versions/vm_1_4_2/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_1_4_2/old_vm/oracles/decommitter.rs index 706e70d4b116..9122a10c9266 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/old_vm/oracles/decommitter.rs @@ -6,16 +6,17 @@ use zk_evm_1_4_1::{ DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, }, }; -use zksync_types::U256; -use zksync_utils::{bytecode::bytecode_len_in_words, bytes_to_be_words, u256_to_h256}; +use zksync_types::{u256_to_h256, U256}; use super::OracleWithHistory; use crate::{ interface::storage::{ReadStorage, StoragePtr}, + utils::bytecode::{bytecode_len_in_words, bytes_to_be_words}, vm_1_4_2::old_vm::history_recorder::{ HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory, }, }; + /// The main job of the DecommiterOracle is to implement the DecommittmentProcessor trait - that is /// used by the VM to 'load' bytecodes into memory. #[derive(Debug)] @@ -60,7 +61,7 @@ impl DecommitterOracle { .load_factory_dep(u256_to_h256(hash)) .expect("Trying to decode unexisting hash"); - let value = bytes_to_be_words(value); + let value = bytes_to_be_words(&value); self.known_bytecodes.insert(hash, value.clone(), timestamp); value } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/oracles/storage.rs b/core/lib/multivm/src/versions/vm_1_4_2/oracles/storage.rs index e8d387621907..170bed0eed5d 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/oracles/storage.rs @@ -6,6 +6,7 @@ use zk_evm_1_4_1::{ zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use zksync_types::{ + u256_to_h256, utils::storage_key_for_eth_balance, writes::{ compression::compress_with_best_strategy, BYTES_PER_DERIVED_KEY, @@ -13,7 +14,6 @@ use zksync_types::{ }, AccountTreeId, Address, StorageKey, StorageLogKind, BOOTLOADER_ADDRESS, U256, }; -use zksync_utils::u256_to_h256; use crate::{ glue::GlueInto, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs index ffe65b5e050b..58318f5d845e 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs @@ -5,19 +5,25 @@ use zk_evm_1_4_1::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, }; -use zksync_types::{writes::StateDiffRecord, AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS}; -use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; +use zksync_types::{ + h256_to_u256, u256_to_h256, writes::StateDiffRecord, AccountTreeId, StorageKey, + L1_MESSENGER_ADDRESS, +}; use crate::{ interface::{ + pubdata::L1MessengerL2ToL1Log, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, }, tracers::dynamic::vm_1_4_1::DynTracer, - utils::events::{ - extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + utils::{ + bytecode::be_words_to_bytes, + events::{ + extract_bytecode_publication_requests_from_l1_messenger, + extract_l2tol1logs_from_l1_messenger, + }, }, vm_1_4_2::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, @@ -119,15 +125,13 @@ impl PubdataTracer { bytecode_publication_requests .iter() .map(|bytecode_publication_request| { - state + let bytecode_words = state .decommittment_processor .known_bytecodes .inner() .get(&h256_to_u256(bytecode_publication_request.bytecode_hash)) - .unwrap() - .iter() - .flat_map(u256_to_bytes_be) - .collect() + .unwrap(); + be_words_to_bytes(bytecode_words) }) .collect() } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/refunds.rs index 0da5736bf955..324cad02b4eb 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/refunds.rs @@ -8,8 +8,9 @@ use zk_evm_1_4_1::{ zkevm_opcode_defs::system_params::L1_MESSAGE_PUBDATA_BYTES, }; use zksync_system_constants::{PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; -use zksync_types::{l2_to_l1_log::L2ToL1Log, L1BatchNumber, H256, U256}; -use zksync_utils::{bytecode::bytecode_len_in_bytes, ceil_div_u256, u256_to_h256}; +use zksync_types::{ + ceil_div_u256, l2_to_l1_log::L2ToL1Log, u256_to_h256, L1BatchNumber, H256, U256, +}; use crate::{ interface::{ @@ -18,6 +19,7 @@ use crate::{ L1BatchEnv, Refunds, VmEvent, }, tracers::dynamic::vm_1_4_1::DynTracer, + utils::bytecode::bytecode_len_in_bytes, vm_1_4_2::{ bootloader_state::BootloaderState, constants::{BOOTLOADER_HEAP_PAGE, OPERATOR_REFUNDS_OFFSET, TX_GAS_LIMIT_OFFSET}, @@ -348,7 +350,7 @@ pub(crate) fn pubdata_published( let published_bytecode_bytes: u32 = VmEvent::extract_published_bytecodes(&events) .iter() - .map(|bytecodehash| bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD) + .map(|bytecode_hash| bytecode_len_in_bytes(bytecode_hash) + PUBLISH_BYTECODE_OVERHEAD) .sum(); storage_writes_pubdata_published diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/utils.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/utils.rs index 5832241d262d..2caf7b060563 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/utils.rs @@ -9,8 +9,7 @@ use zksync_system_constants::{ ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, SHA256_PRECOMPILE_ADDRESS, }; -use zksync_types::U256; -use zksync_utils::u256_to_h256; +use zksync_types::{u256_to_h256, U256}; use crate::vm_1_4_2::{ constants::{ diff --git a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs index d07732ae4350..f938696297b5 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] @@ -64,7 +64,7 @@ impl PubdataInput { #[cfg(test)] mod tests { use zksync_system_constants::{ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS}; - use zksync_utils::u256_to_h256; + use zksync_types::u256_to_h256; use super::*; diff --git a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs index 38280aa80513..e0f113f8a7ff 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs @@ -1,19 +1,24 @@ use std::convert::TryInto; use zksync_types::{ + address_to_h256, + bytecode::BytecodeHash, ethabi::{encode, Address, Token}, fee::{encoding_len, Fee}, + h256_to_u256, l1::is_l1_tx_type, l2::{L2Tx, TransactionType}, transaction_request::{PaymasterParams, TransactionRequest}, web3::Bytes, Execute, ExecuteTransactionCommon, L2ChainId, L2TxCommonData, Nonce, Transaction, H256, U256, }; -use zksync_utils::{address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; -use crate::vm_1_4_2::{ - constants::{L1_TX_TYPE, MAX_GAS_PER_PUBDATA_BYTE, PRIORITY_TX_MAX_GAS_LIMIT}, - utils::overhead::derive_overhead, +use crate::{ + utils::bytecode::bytes_to_be_words, + vm_1_4_2::{ + constants::{L1_TX_TYPE, MAX_GAS_PER_PUBDATA_BYTE, PRIORITY_TX_MAX_GAS_LIMIT}, + utils::overhead::derive_overhead, + }, }; /// This structure represents the data that is used by @@ -190,16 +195,13 @@ impl TransactionData { let factory_deps_hashes = self .factory_deps .iter() - .map(|dep| h256_to_u256(hash_bytecode(dep))) + .map(|dep| BytecodeHash::for_bytecode(dep).value_u256()) .collect(); self.abi_encode_with_custom_factory_deps(factory_deps_hashes) } pub(crate) fn into_tokens(self) -> Vec { - let bytes = self.abi_encode(); - assert!(bytes.len() % 32 == 0); - - bytes_to_be_words(bytes) + bytes_to_be_words(&self.abi_encode()) } pub(crate) fn overhead_gas(&self) -> u32 { diff --git a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/vm_state.rs index 87630a1ff372..52a0dc61d740 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/vm_state.rs @@ -11,14 +11,14 @@ use zk_evm_1_4_1::{ }, }; use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::{block::L2BlockHasher, Address, L2BlockNumber}; -use zksync_utils::h256_to_u256; +use zksync_types::{block::L2BlockHasher, h256_to_u256, Address, L2BlockNumber}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, L1BatchEnv, L2Block, SystemEnv, }, + utils::bytecode::bytes_to_be_words, vm_1_4_2::{ bootloader_state::BootloaderState, constants::BOOTLOADER_HEAP_PAGE, @@ -89,11 +89,7 @@ pub(crate) fn new_vm_state( decommittment_processor.populate( vec![( h256_to_u256(system_env.base_system_smart_contracts.default_aa.hash), - system_env - .base_system_smart_contracts - .default_aa - .code - .clone(), + bytes_to_be_words(&system_env.base_system_smart_contracts.default_aa.code), )], Timestamp(0), ); @@ -101,11 +97,7 @@ pub(crate) fn new_vm_state( memory.populate( vec![( BOOTLOADER_CODE_PAGE, - system_env - .base_system_smart_contracts - .bootloader - .code - .clone(), + bytes_to_be_words(&system_env.base_system_smart_contracts.bootloader.code), )], Timestamp(0), ); diff --git a/core/lib/multivm/src/versions/vm_1_4_2/types/l1_batch.rs b/core/lib/multivm/src/versions/vm_1_4_2/types/l1_batch.rs index b3a54c410f4d..d2233a515eab 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/types/l1_batch.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/types/l1_batch.rs @@ -1,5 +1,4 @@ -use zksync_types::U256; -use zksync_utils::{address_to_u256, h256_to_u256}; +use zksync_types::{address_to_u256, h256_to_u256, U256}; use crate::{interface::L1BatchEnv, vm_1_4_2::utils::fee::get_batch_base_fee}; diff --git a/core/lib/multivm/src/versions/vm_1_4_2/utils/fee.rs b/core/lib/multivm/src/versions/vm_1_4_2/utils/fee.rs index 11f8b6b6c427..b01b18716836 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/utils/fee.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/utils/fee.rs @@ -1,6 +1,5 @@ //! Utility functions for vm use zksync_types::fee_model::PubdataIndependentBatchFeeModelInput; -use zksync_utils::ceil_div; use crate::{interface::L1BatchEnv, vm_1_4_2::constants::MAX_GAS_PER_PUBDATA_BYTE}; @@ -18,11 +17,14 @@ pub(crate) fn derive_base_fee_and_gas_per_pubdata( // publish enough public data while compensating us for it. let base_fee = std::cmp::max( fair_l2_gas_price, - ceil_div(fair_pubdata_price, MAX_GAS_PER_PUBDATA_BYTE), + fair_pubdata_price.div_ceil(MAX_GAS_PER_PUBDATA_BYTE), ); - let gas_per_pubdata = ceil_div(fair_pubdata_price, base_fee); - + let gas_per_pubdata = if fair_pubdata_price == 0 { + 0 + } else { + fair_pubdata_price.div_ceil(base_fee) + }; (base_fee, gas_per_pubdata) } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/utils/l2_blocks.rs b/core/lib/multivm/src/versions/vm_1_4_2/utils/l2_blocks.rs index ff5536ae0b97..1095abd82db1 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/utils/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/utils/l2_blocks.rs @@ -4,9 +4,9 @@ use zksync_system_constants::{ SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES, }; use zksync_types::{ - block::unpack_block_info, web3::keccak256, AccountTreeId, L2BlockNumber, StorageKey, H256, U256, + block::unpack_block_info, h256_to_u256, u256_to_h256, web3::keccak256, AccountTreeId, + L2BlockNumber, StorageKey, H256, U256, }; -use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::interface::{ storage::{ReadStorage, StoragePtr}, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs index fe2015debd2b..e7c8e7acdd95 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs @@ -1,17 +1,18 @@ -use std::mem; +use std::{mem, rc::Rc}; use circuit_sequencer_api_1_4_2::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, utils::events::extract_l2tol1logs_from_l1_messenger, @@ -83,18 +84,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(mem::take(tracer), execution_mode, None) + self.inspect_inner(mem::take(tracer), execution_mode.into(), None) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -125,8 +131,8 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(TracerDispatcher::default(), VmExecutionMode::Batch, None); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/l2_block.rs b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/l2_block.rs index 47bbbb5bae64..501207e52bd9 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/l2_block.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/l2_block.rs @@ -1,7 +1,6 @@ use std::cmp::Ordering; -use zksync_types::{L2BlockNumber, H256}; -use zksync_utils::concat_and_hash; +use zksync_types::{web3::keccak256_concat, L2BlockNumber, H256}; use crate::{ interface::{L2Block, L2BlockEnv}, @@ -53,7 +52,7 @@ impl BootloaderL2Block { } fn update_rolling_hash(&mut self, tx_hash: H256) { - self.txs_rolling_hash = concat_and_hash(self.txs_rolling_hash, tx_hash) + self.txs_rolling_hash = keccak256_concat(self.txs_rolling_hash, tx_hash) } pub(crate) fn interim_version(&self) -> BootloaderL2Block { diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs index c97d3ff30e49..6605bea1f6b5 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs @@ -1,5 +1,4 @@ -use zksync_types::{ethabi, U256}; -use zksync_utils::{bytes_to_be_words, h256_to_u256}; +use zksync_types::{ethabi, h256_to_u256, U256}; use super::tx::BootloaderTx; use crate::{ @@ -25,8 +24,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( .iter() .flat_map(bytecode::encode_call) .collect(); - - bytes_to_be_words(memory_addition) + bytecode::bytes_to_be_words(&memory_addition) } #[allow(clippy::too_many_arguments)] diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/bytecode.rs index 2d6f081a1886..42507a589e50 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/bytecode.rs @@ -1,13 +1,12 @@ use itertools::Itertools; -use zksync_types::U256; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; +use zksync_types::{bytecode::BytecodeHash, U256}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, CompressedBytecodeInfo, }, - utils::bytecode, + utils::{bytecode, bytecode::bytes_to_be_words}, vm_boojum_integration::Vm, HistoryMode, }; @@ -25,18 +24,15 @@ impl Vm { .storage .get_ptr() .borrow_mut() - .is_bytecode_known(&hash_bytecode(&info.original)) + .is_bytecode_known(&BytecodeHash::for_bytecode(&info.original).value()) }) } } /// Converts bytecode to tokens and hashes it. pub(crate) fn bytecode_to_factory_dep(bytecode: Vec) -> (U256, Vec) { - let bytecode_hash = hash_bytecode(&bytecode); - let bytecode_hash = U256::from_big_endian(bytecode_hash.as_bytes()); - - let bytecode_words = bytes_to_be_words(bytecode); - + let bytecode_hash = BytecodeHash::for_bytecode(&bytecode).value_u256(); + let bytecode_words = bytes_to_be_words(&bytecode); (bytecode_hash, bytecode_words) } @@ -49,7 +45,11 @@ pub(crate) fn compress_bytecodes( .enumerate() .sorted_by_key(|(_idx, dep)| *dep) .dedup_by(|x, y| x.1 == y.1) - .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) + .filter(|(_idx, dep)| { + !storage + .borrow_mut() + .is_bytecode_known(&BytecodeHash::for_bytecode(dep).value()) + }) .sorted_by_key(|(idx, _dep)| *idx) .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs index b8b939f86731..e942f0fc4245 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs @@ -1,4 +1,4 @@ -use std::mem; +use std::{collections::HashMap, mem}; use zk_evm_1_4_0::aux_structures::Timestamp; @@ -93,7 +93,7 @@ impl Vm { logs, statistics, refunds, - new_known_factory_deps: None, + dynamic_factory_deps: HashMap::new(), // dynamic bytecode deployment is not supported }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/events.rs b/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/events.rs index 1e95d0bc8f35..48db28747bef 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/events.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/events.rs @@ -1,8 +1,7 @@ use zk_evm_1_4_0::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use zksync_types::{h256_to_address, L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use crate::interface::VmEvent; +use crate::{interface::VmEvent, utils::bytecode::be_chunks_to_h256_words}; #[derive(Clone)] pub(crate) struct SolidityLikeEvent { @@ -135,7 +134,7 @@ pub(crate) fn merge_events(events: Vec) -> Vec .filter(|e| e.address == EVENT_WRITER_ADDRESS) .map(|event| { // The events writer events where the first topic is the actual address of the event and the rest of the topics are real topics - let address = h256_to_account_address(&H256(event.topics[0])); + let address = h256_to_address(&H256(event.topics[0])); let topics = event.topics.into_iter().skip(1).collect(); SolidityLikeEvent { diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/history_recorder.rs b/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/history_recorder.rs index 704a774893d3..19da0ffda77c 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/history_recorder.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/history_recorder.rs @@ -5,8 +5,7 @@ use zk_evm_1_4_0::{ vm_state::PrimitiveValue, zkevm_opcode_defs::{self}, }; -use zksync_types::{StorageKey, U256}; -use zksync_utils::{h256_to_u256, u256_to_h256}; +use zksync_types::{h256_to_u256, u256_to_h256, StorageKey, U256}; use crate::interface::storage::{StoragePtr, WriteStorage}; diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/oracles/decommitter.rs index eb7db7097920..804bd7179781 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/old_vm/oracles/decommitter.rs @@ -6,12 +6,12 @@ use zk_evm_1_4_0::{ DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, }, }; -use zksync_types::U256; -use zksync_utils::{bytecode::bytecode_len_in_words, bytes_to_be_words, u256_to_h256}; +use zksync_types::{u256_to_h256, U256}; use super::OracleWithHistory; use crate::{ interface::storage::{ReadStorage, StoragePtr}, + utils::bytecode::{bytecode_len_in_words, bytes_to_be_words}, vm_boojum_integration::old_vm::history_recorder::{ HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory, }, @@ -61,7 +61,7 @@ impl DecommitterOracle { .load_factory_dep(u256_to_h256(hash)) .expect("Trying to decode unexisting hash"); - let value = bytes_to_be_words(value); + let value = bytes_to_be_words(&value); self.known_bytecodes.insert(hash, value.clone(), timestamp); value } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/oracles/storage.rs b/core/lib/multivm/src/versions/vm_boojum_integration/oracles/storage.rs index acdfbaaa42e0..b5fc1c5b92f8 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/oracles/storage.rs @@ -6,6 +6,7 @@ use zk_evm_1_4_0::{ zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use zksync_types::{ + u256_to_h256, utils::storage_key_for_eth_balance, writes::{ compression::compress_with_best_strategy, BYTES_PER_DERIVED_KEY, @@ -13,7 +14,6 @@ use zksync_types::{ }, AccountTreeId, Address, StorageKey, StorageLogKind, BOOTLOADER_ADDRESS, U256, }; -use zksync_utils::u256_to_h256; use crate::{ interface::storage::{StoragePtr, WriteStorage}, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs index 326a57896124..6396d143b401 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs @@ -5,19 +5,25 @@ use zk_evm_1_4_0::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, }; -use zksync_types::{writes::StateDiffRecord, AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS}; -use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; +use zksync_types::{ + h256_to_u256, u256_to_h256, writes::StateDiffRecord, AccountTreeId, StorageKey, + L1_MESSENGER_ADDRESS, +}; use crate::{ interface::{ + pubdata::L1MessengerL2ToL1Log, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, }, tracers::dynamic::vm_1_4_0::DynTracer, - utils::events::{ - extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + utils::{ + bytecode::be_words_to_bytes, + events::{ + extract_bytecode_publication_requests_from_l1_messenger, + extract_l2tol1logs_from_l1_messenger, + }, }, vm_boojum_integration::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, @@ -97,15 +103,13 @@ impl PubdataTracer { bytecode_publication_requests .iter() .map(|bytecode_publication_request| { - state + let bytecode_words = state .decommittment_processor .known_bytecodes .inner() .get(&h256_to_u256(bytecode_publication_request.bytecode_hash)) - .unwrap() - .iter() - .flat_map(u256_to_bytes_be) - .collect() + .unwrap(); + be_words_to_bytes(bytecode_words) }) .collect() } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/refunds.rs index ffbb1d80a80e..682cbda5252a 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/refunds.rs @@ -7,8 +7,7 @@ use zk_evm_1_4_0::{ vm_state::VmLocalState, }; use zksync_system_constants::{PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; -use zksync_types::{l2_to_l1_log::L2ToL1Log, L1BatchNumber, U256}; -use zksync_utils::{bytecode::bytecode_len_in_bytes, ceil_div_u256, u256_to_h256}; +use zksync_types::{ceil_div_u256, l2_to_l1_log::L2ToL1Log, u256_to_h256, L1BatchNumber, U256}; use crate::{ interface::{ @@ -17,6 +16,7 @@ use crate::{ L1BatchEnv, Refunds, VmEvent, }, tracers::dynamic::vm_1_4_0::DynTracer, + utils::bytecode::bytecode_len_in_bytes, vm_boojum_integration::{ bootloader_state::BootloaderState, constants::{BOOTLOADER_HEAP_PAGE, OPERATOR_REFUNDS_OFFSET, TX_GAS_LIMIT_OFFSET}, @@ -339,7 +339,7 @@ pub(crate) fn pubdata_published( let published_bytecode_bytes: u32 = VmEvent::extract_published_bytecodes(&events) .iter() - .map(|bytecodehash| bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD) + .map(|bytecode_hash| bytecode_len_in_bytes(bytecode_hash) + PUBLISH_BYTECODE_OVERHEAD) .sum(); storage_writes_pubdata_published diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/utils.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/utils.rs index aafdab9ee428..e916d6e0e66c 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/utils.rs @@ -9,8 +9,7 @@ use zksync_system_constants::{ ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, SHA256_PRECOMPILE_ADDRESS, }; -use zksync_types::U256; -use zksync_utils::u256_to_h256; +use zksync_types::{u256_to_h256, U256}; use crate::vm_boojum_integration::{ constants::{ diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs index 9df9009831f4..cb400ab5fa7d 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] @@ -64,7 +64,7 @@ impl PubdataInput { #[cfg(test)] mod tests { use zksync_system_constants::{ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS}; - use zksync_utils::u256_to_h256; + use zksync_types::u256_to_h256; use super::*; diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs index 8bf575effe06..9011fa486da2 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs @@ -1,19 +1,24 @@ use std::convert::TryInto; use zksync_types::{ + address_to_h256, + bytecode::BytecodeHash, ethabi::{encode, Address, Token}, fee::{encoding_len, Fee}, + h256_to_u256, l1::is_l1_tx_type, l2::{L2Tx, TransactionType}, transaction_request::{PaymasterParams, TransactionRequest}, web3::Bytes, Execute, ExecuteTransactionCommon, L2ChainId, L2TxCommonData, Nonce, Transaction, H256, U256, }; -use zksync_utils::{address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; -use crate::vm_boojum_integration::{ - constants::MAX_GAS_PER_PUBDATA_BYTE, - utils::overhead::{get_amortized_overhead, OverheadCoefficients}, +use crate::{ + utils::bytecode::bytes_to_be_words, + vm_boojum_integration::{ + constants::MAX_GAS_PER_PUBDATA_BYTE, + utils::overhead::{get_amortized_overhead, OverheadCoefficients}, + }, }; /// This structure represents the data that is used by @@ -190,16 +195,13 @@ impl TransactionData { let factory_deps_hashes = self .factory_deps .iter() - .map(|dep| h256_to_u256(hash_bytecode(dep))) + .map(|dep| BytecodeHash::for_bytecode(dep).value_u256()) .collect(); self.abi_encode_with_custom_factory_deps(factory_deps_hashes) } pub(crate) fn into_tokens(self) -> Vec { - let bytes = self.abi_encode(); - assert!(bytes.len() % 32 == 0); - - bytes_to_be_words(bytes) + bytes_to_be_words(&self.abi_encode()) } pub(crate) fn effective_gas_price_per_pubdata(&self, block_gas_price_per_pubdata: u32) -> u32 { diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/vm_state.rs index 5b6b9b2eca17..dc41926c4485 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/vm_state.rs @@ -11,14 +11,14 @@ use zk_evm_1_4_0::{ }, }; use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::{block::L2BlockHasher, Address, L2BlockNumber}; -use zksync_utils::h256_to_u256; +use zksync_types::{block::L2BlockHasher, h256_to_u256, Address, L2BlockNumber}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, L1BatchEnv, L2Block, SystemEnv, }, + utils::bytecode::bytes_to_be_words, vm_boojum_integration::{ bootloader_state::BootloaderState, constants::BOOTLOADER_HEAP_PAGE, @@ -89,11 +89,7 @@ pub(crate) fn new_vm_state( decommittment_processor.populate( vec![( h256_to_u256(system_env.base_system_smart_contracts.default_aa.hash), - system_env - .base_system_smart_contracts - .default_aa - .code - .clone(), + bytes_to_be_words(&system_env.base_system_smart_contracts.default_aa.code), )], Timestamp(0), ); @@ -101,11 +97,7 @@ pub(crate) fn new_vm_state( memory.populate( vec![( BOOTLOADER_CODE_PAGE, - system_env - .base_system_smart_contracts - .bootloader - .code - .clone(), + bytes_to_be_words(&system_env.base_system_smart_contracts.bootloader.code), )], Timestamp(0), ); diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/types/l1_batch.rs b/core/lib/multivm/src/versions/vm_boojum_integration/types/l1_batch.rs index 386dc040099b..91082e98f9d1 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/types/l1_batch.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/types/l1_batch.rs @@ -1,5 +1,4 @@ -use zksync_types::U256; -use zksync_utils::{address_to_u256, h256_to_u256}; +use zksync_types::{address_to_u256, h256_to_u256, U256}; use crate::{interface::L1BatchEnv, vm_boojum_integration::utils::fee::get_batch_base_fee}; diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/utils/fee.rs b/core/lib/multivm/src/versions/vm_boojum_integration/utils/fee.rs index 8e785775697a..6fa1a38828e0 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/utils/fee.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/utils/fee.rs @@ -1,6 +1,5 @@ //! Utility functions for vm use zksync_types::fee_model::L1PeggedBatchFeeModelInput; -use zksync_utils::ceil_div; use crate::{ interface::L1BatchEnv, @@ -12,8 +11,11 @@ use crate::{ /// Calculates the amount of gas required to publish one byte of pubdata pub fn base_fee_to_gas_per_pubdata(l1_gas_price: u64, base_fee: u64) -> u64 { let eth_price_per_pubdata_byte = eth_price_per_pubdata_byte(l1_gas_price); - - ceil_div(eth_price_per_pubdata_byte, base_fee) + if eth_price_per_pubdata_byte == 0 { + 0 + } else { + eth_price_per_pubdata_byte.div_ceil(base_fee) + } } /// Calculates the base fee and gas per pubdata for the given L1 gas price. @@ -30,7 +32,7 @@ pub(crate) fn derive_base_fee_and_gas_per_pubdata( // publish enough public data while compensating us for it. let base_fee = std::cmp::max( fair_l2_gas_price, - ceil_div(eth_price_per_pubdata_byte, MAX_GAS_PER_PUBDATA_BYTE), + eth_price_per_pubdata_byte.div_ceil(MAX_GAS_PER_PUBDATA_BYTE), ); ( diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/utils/l2_blocks.rs b/core/lib/multivm/src/versions/vm_boojum_integration/utils/l2_blocks.rs index ff5536ae0b97..1095abd82db1 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/utils/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/utils/l2_blocks.rs @@ -4,9 +4,9 @@ use zksync_system_constants::{ SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES, }; use zksync_types::{ - block::unpack_block_info, web3::keccak256, AccountTreeId, L2BlockNumber, StorageKey, H256, U256, + block::unpack_block_info, h256_to_u256, u256_to_h256, web3::keccak256, AccountTreeId, + L2BlockNumber, StorageKey, H256, U256, }; -use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::interface::{ storage::{ReadStorage, StoragePtr}, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/utils/overhead.rs b/core/lib/multivm/src/versions/vm_boojum_integration/utils/overhead.rs index 02fe0b8b3000..c6d299075f2a 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/utils/overhead.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/utils/overhead.rs @@ -1,7 +1,6 @@ use zk_evm_1_4_0::zkevm_opcode_defs::system_params::MAX_TX_ERGS_LIMIT; use zksync_system_constants::MAX_L2_TX_GAS_LIMIT; -use zksync_types::{l1::is_l1_tx_type, U256}; -use zksync_utils::ceil_div_u256; +use zksync_types::{ceil_div_u256, l1::is_l1_tx_type, U256}; use crate::vm_boojum_integration::constants::{ BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, MAX_TXS_IN_BLOCK, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs index ebc0a511d203..43c9900486db 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs @@ -1,15 +1,18 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_4_0::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, utils::events::extract_l2tol1logs_from_l1_messenger, @@ -81,18 +84,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult { self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode) + self.inspect_inner(tracer, execution_mode.into()) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -124,8 +132,8 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/l2_block.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/l2_block.rs index adb406eec789..4f05ef30a46d 100644 --- a/core/lib/multivm/src/versions/vm_fast/bootloader_state/l2_block.rs +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/l2_block.rs @@ -1,7 +1,6 @@ use std::cmp::Ordering; -use zksync_types::{L2BlockNumber, H256}; -use zksync_utils::concat_and_hash; +use zksync_types::{web3::keccak256_concat, L2BlockNumber, H256}; use super::{snapshot::L2BlockSnapshot, tx::BootloaderTx}; use crate::{ @@ -51,7 +50,7 @@ impl BootloaderL2Block { } fn update_rolling_hash(&mut self, tx_hash: H256) { - self.txs_rolling_hash = concat_and_hash(self.txs_rolling_hash, tx_hash) + self.txs_rolling_hash = keccak256_concat(self.txs_rolling_hash, tx_hash) } pub(crate) fn make_snapshot(&self) -> L2BlockSnapshot { diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs index 15b4daf02a77..e104eba6ef4f 100644 --- a/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs @@ -1,7 +1,7 @@ use std::cmp::Ordering; use once_cell::sync::OnceCell; -use zksync_types::{L2ChainId, U256}; +use zksync_types::{L2ChainId, ProtocolVersionId, U256}; use super::{ l2_block::BootloaderL2Block, @@ -10,8 +10,11 @@ use super::{ BootloaderStateSnapshot, }; use crate::{ - interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, - versions::vm_fast::{pubdata::PubdataInput, transaction_data::TransactionData}, + interface::{ + pubdata::{PubdataBuilder, PubdataInput}, + BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode, + }, + versions::vm_fast::transaction_data::TransactionData, vm_latest::{constants::TX_DESCRIPTION_OFFSET, utils::l2_blocks::assert_next_block}, }; @@ -42,6 +45,8 @@ pub struct BootloaderState { free_tx_offset: usize, /// Information about the pubdata that will be needed to supply to the L1Messenger pubdata_information: OnceCell, + /// Protocol version. + protocol_version: ProtocolVersionId, } impl BootloaderState { @@ -49,6 +54,7 @@ impl BootloaderState { execution_mode: TxExecutionMode, initial_memory: BootloaderMemory, first_l2_block: L2BlockEnv, + protocol_version: ProtocolVersionId, ) -> Self { let l2_block = BootloaderL2Block::new(first_l2_block, 0); Self { @@ -59,6 +65,7 @@ impl BootloaderState { execution_mode, free_tx_offset: 0, pubdata_information: Default::default(), + protocol_version, } } @@ -139,12 +146,23 @@ impl BootloaderState { .expect("Pubdata information is not set") } + pub(crate) fn settlement_layer_pubdata(&self, pubdata_builder: &dyn PubdataBuilder) -> Vec { + let pubdata_information = self + .pubdata_information + .get() + .expect("Pubdata information is not set"); + pubdata_builder.settlement_layer_pubdata(pubdata_information, self.protocol_version) + } + fn last_mut_l2_block(&mut self) -> &mut BootloaderL2Block { self.l2_blocks.last_mut().unwrap() } /// Apply all bootloader transaction to the initial memory - pub(crate) fn bootloader_memory(&self) -> BootloaderMemory { + pub(crate) fn bootloader_memory( + &self, + pubdata_builder: &dyn PubdataBuilder, + ) -> BootloaderMemory { let mut initial_memory = self.initial_memory.clone(); let mut offset = 0; let mut compressed_bytecodes_offset = 0; @@ -172,11 +190,15 @@ impl BootloaderState { let pubdata_information = self .pubdata_information - .clone() - .into_inner() + .get() .expect("Empty pubdata information"); - apply_pubdata_to_memory(&mut initial_memory, pubdata_information); + apply_pubdata_to_memory( + &mut initial_memory, + pubdata_builder, + pubdata_information, + self.protocol_version, + ); initial_memory } diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs index 770f232019bf..9eb55d794235 100644 --- a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs @@ -1,11 +1,12 @@ -use zksync_types::{ethabi, U256}; -use zksync_utils::{bytes_to_be_words, h256_to_u256}; +use zksync_types::{ethabi, h256_to_u256, ProtocolVersionId, U256}; use super::{l2_block::BootloaderL2Block, tx::BootloaderTx}; use crate::{ - interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + interface::{ + pubdata::{PubdataBuilder, PubdataInput}, + BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode, + }, utils::bytecode, - versions::vm_fast::pubdata::PubdataInput, vm_latest::constants::{ BOOTLOADER_TX_DESCRIPTION_OFFSET, BOOTLOADER_TX_DESCRIPTION_SIZE, COMPRESSED_BYTECODES_OFFSET, OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET, @@ -22,8 +23,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( .iter() .flat_map(bytecode::encode_call) .collect(); - - bytes_to_be_words(memory_addition) + bytecode::bytes_to_be_words(&memory_addition) } #[allow(clippy::too_many_arguments)] @@ -120,26 +120,54 @@ fn apply_l2_block_inner( ]) } +fn bootloader_memory_input( + pubdata_builder: &dyn PubdataBuilder, + input: &PubdataInput, + protocol_version: ProtocolVersionId, +) -> Vec { + let l2_da_validator_address = pubdata_builder.l2_da_validator(); + let operator_input = pubdata_builder.l1_messenger_operator_input(input, protocol_version); + ethabi::encode(&[ + ethabi::Token::Address(l2_da_validator_address), + ethabi::Token::Bytes(operator_input), + ]) +} + pub(crate) fn apply_pubdata_to_memory( memory: &mut BootloaderMemory, - pubdata_information: PubdataInput, + pubdata_builder: &dyn PubdataBuilder, + pubdata_information: &PubdataInput, + protocol_version: ProtocolVersionId, ) { - // Skipping two slots as they will be filled by the bootloader itself: - // - One slot is for the selector of the call to the L1Messenger. - // - The other slot is for the 0x20 offset for the calldata. - let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 2; - - // Need to skip first word as it represents array offset - // while bootloader expects only [len || data] - let pubdata = ethabi::encode(&[ethabi::Token::Bytes( - pubdata_information.build_pubdata(true), - )])[32..] - .to_vec(); - - assert!( - pubdata.len() / 32 <= OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS - 2, - "The encoded pubdata is too big" - ); + let (l1_messenger_pubdata_start_slot, pubdata) = if protocol_version.is_pre_gateway() { + // Skipping two slots as they will be filled by the bootloader itself: + // - One slot is for the selector of the call to the L1Messenger. + // - The other slot is for the 0x20 offset for the calldata. + let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 2; + // Need to skip first word as it represents array offset + // while bootloader expects only [len || data] + let pubdata = ethabi::encode(&[ethabi::Token::Bytes( + pubdata_builder.l1_messenger_operator_input(pubdata_information, protocol_version), + )])[32..] + .to_vec(); + assert!( + pubdata.len() / 32 <= OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS - 2, + "The encoded pubdata is too big" + ); + (l1_messenger_pubdata_start_slot, pubdata) + } else { + // Skipping the first slot as it will be filled by the bootloader itself: + // It is for the selector of the call to the L1Messenger. + let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 1; + let pubdata = + bootloader_memory_input(pubdata_builder, pubdata_information, protocol_version); + assert!( + // Note that unlike the previous version, the difference is `1`, since now it also includes the offset + pubdata.len() / 32 < OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS, + "The encoded pubdata is too big" + ); + (l1_messenger_pubdata_start_slot, pubdata) + }; pubdata .chunks(32) diff --git a/core/lib/multivm/src/versions/vm_fast/bytecode.rs b/core/lib/multivm/src/versions/vm_fast/bytecode.rs index b75e33a21b05..4dc52951c16c 100644 --- a/core/lib/multivm/src/versions/vm_fast/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_fast/bytecode.rs @@ -1,6 +1,5 @@ use itertools::Itertools; -use zksync_types::H256; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; +use zksync_types::{bytecode::BytecodeHash, h256_to_u256, H256}; use super::Vm; use crate::{ @@ -15,7 +14,7 @@ impl Vm { .get_last_tx_compressed_bytecodes() .iter() .any(|info| { - let hash_bytecode = hash_bytecode(&info.original); + let hash_bytecode = BytecodeHash::for_bytecode(&info.original).value(); let is_bytecode_known = self.world.storage.is_bytecode_known(&hash_bytecode); let is_bytecode_known_cache = self @@ -36,7 +35,7 @@ pub(crate) fn compress_bytecodes( .enumerate() .sorted_by_key(|(_idx, dep)| *dep) .dedup_by(|x, y| x.1 == y.1) - .filter(|(_idx, dep)| !is_bytecode_known(hash_bytecode(dep))) + .filter(|(_idx, dep)| !is_bytecode_known(BytecodeHash::for_bytecode(dep).value())) .sorted_by_key(|(idx, _dep)| *idx) .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() diff --git a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs index b48ec7eacb0b..9c1c0b7dfb7f 100644 --- a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs @@ -1,5 +1,7 @@ use circuit_sequencer_api_1_5_0::{geometry_config::get_geometry_config, toolset::GeometryConfig}; -use zksync_vm2::interface::{CycleStats, Opcode, OpcodeType, StateInterface, Tracer}; +use zksync_vm2::interface::{ + CycleStats, GlobalStateInterface, Opcode, OpcodeType, ShouldStop, Tracer, +}; use zksync_vm_interface::CircuitStatistic; use crate::vm_latest::tracers::circuits_capacity::*; @@ -7,7 +9,7 @@ use crate::vm_latest::tracers::circuits_capacity::*; /// VM tracer tracking [`CircuitStatistic`]s. Statistics generally depend on the number of time some opcodes were invoked, /// and, for precompiles, invocation complexity (e.g., how many hashing cycles `keccak256` required). #[derive(Debug, Default, Clone, PartialEq)] -pub struct CircuitsTracer { +pub(super) struct CircuitsTracer { main_vm_cycles: u32, ram_permutation_cycles: u32, storage_application_cycles: u32, @@ -24,7 +26,10 @@ pub struct CircuitsTracer { } impl Tracer for CircuitsTracer { - fn after_instruction(&mut self, _state: &mut S) { + fn after_instruction( + &mut self, + _: &mut S, + ) -> ShouldStop { self.main_vm_cycles += 1; match OP::VALUE { @@ -110,6 +115,8 @@ impl Tracer for CircuitsTracer { self.ram_permutation_cycles += UMA_READ_RAM_CYCLES; } } + + ShouldStop::Continue } fn on_extra_prover_cycles(&mut self, stats: CycleStats) { diff --git a/core/lib/multivm/src/versions/vm_fast/events.rs b/core/lib/multivm/src/versions/vm_fast/events.rs index 294e8adce32b..4fb26d306897 100644 --- a/core/lib/multivm/src/versions/vm_fast/events.rs +++ b/core/lib/multivm/src/versions/vm_fast/events.rs @@ -1,5 +1,4 @@ -use zksync_types::{L1BatchNumber, H256}; -use zksync_utils::h256_to_account_address; +use zksync_types::{h256_to_address, L1BatchNumber, H256}; use zksync_vm2::interface::Event; use crate::interface::VmEvent; @@ -16,7 +15,7 @@ impl EventAccumulator { fn into_vm_event(self, block_number: L1BatchNumber) -> VmEvent { VmEvent { location: (block_number, self.tx_number_in_block as u32), - address: h256_to_account_address(&H256(self.topics[0])), + address: h256_to_address(&H256(self.topics[0])), indexed_topics: self.topics[1..].iter().map(H256::from).collect(), value: self.data, } diff --git a/core/lib/multivm/src/versions/vm_fast/evm_deploy_tracer.rs b/core/lib/multivm/src/versions/vm_fast/evm_deploy_tracer.rs new file mode 100644 index 000000000000..c443c99ccf9a --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/evm_deploy_tracer.rs @@ -0,0 +1,88 @@ +//! Tracer tracking deployment of EVM bytecodes during VM execution. + +use std::{cell::RefCell, collections::HashMap, rc::Rc}; + +use zksync_system_constants::{CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS}; +use zksync_types::{bytecode::BytecodeHash, U256}; +use zksync_vm2::interface::{ + CallframeInterface, CallingMode, GlobalStateInterface, Opcode, OpcodeType, ShouldStop, Tracer, +}; + +use super::utils::read_fat_pointer; + +/// Container for dynamic bytecodes added by [`EvmDeployTracer`]. +#[derive(Debug, Clone, Default)] +pub(super) struct DynamicBytecodes(Rc>>>); + +impl DynamicBytecodes { + pub(super) fn map(&self, hash: U256, f: impl FnOnce(&[u8]) -> R) -> Option { + self.0.borrow().get(&hash).map(|code| f(code)) + } + + fn insert(&self, hash: U256, bytecode: Vec) { + self.0.borrow_mut().insert(hash, bytecode); + } +} + +/// Tracer that tracks EVM bytecode deployments. +/// +/// Unlike EraVM bytecodes, EVM bytecodes are *dynamic*; they are not necessarily known before transaction execution. +/// (EraVM bytecodes must be present in the storage or be mentioned in the `factory_deps` field of a transaction.) +/// Hence, it's necessary to track which EVM bytecodes were deployed so that they are persisted after VM execution. +#[derive(Debug)] +pub(super) struct EvmDeployTracer { + tracked_signature: [u8; 4], + bytecodes: DynamicBytecodes, +} + +impl EvmDeployTracer { + pub(super) fn new(bytecodes: DynamicBytecodes) -> Self { + let tracked_signature = + ethabi::short_signature("publishEVMBytecode", &[ethabi::ParamType::Bytes]); + Self { + tracked_signature, + bytecodes, + } + } + + fn handle_far_call(&self, state: &mut impl GlobalStateInterface) { + let from = state.current_frame().caller(); + let to = state.current_frame().code_address(); + if from != CONTRACT_DEPLOYER_ADDRESS || to != KNOWN_CODES_STORAGE_ADDRESS { + return; + } + + let data = read_fat_pointer(state, state.read_register(1).0); + if data.len() < 4 { + return; + } + let (signature, data) = data.split_at(4); + if signature != self.tracked_signature { + return; + } + + match ethabi::decode(&[ethabi::ParamType::Bytes], data) { + Ok(decoded) => { + // `unwrap`s should be safe since the function signature is checked above. + let published_bytecode = decoded.into_iter().next().unwrap().into_bytes().unwrap(); + let bytecode_hash = + BytecodeHash::for_evm_bytecode(&published_bytecode).value_u256(); + self.bytecodes.insert(bytecode_hash, published_bytecode); + } + Err(err) => tracing::error!("Unable to decode `publishEVMBytecode` call: {err}"), + } + } +} + +impl Tracer for EvmDeployTracer { + #[inline(always)] + fn after_instruction( + &mut self, + state: &mut S, + ) -> ShouldStop { + if matches!(OP::VALUE, Opcode::FarCall(CallingMode::Normal)) { + self.handle_far_call(state); + } + ShouldStop::Continue + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/glue.rs b/core/lib/multivm/src/versions/vm_fast/glue.rs index c2d38f351c04..f1a43d557358 100644 --- a/core/lib/multivm/src/versions/vm_fast/glue.rs +++ b/core/lib/multivm/src/versions/vm_fast/glue.rs @@ -1,5 +1,7 @@ -use zksync_types::l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log}; -use zksync_utils::u256_to_h256; +use zksync_types::{ + l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log}, + u256_to_h256, +}; use zksync_vm2::interface; use crate::glue::GlueFrom; diff --git a/core/lib/multivm/src/versions/vm_fast/initial_bootloader_memory.rs b/core/lib/multivm/src/versions/vm_fast/initial_bootloader_memory.rs index b3bf15cb1be5..89b22d328ac5 100644 --- a/core/lib/multivm/src/versions/vm_fast/initial_bootloader_memory.rs +++ b/core/lib/multivm/src/versions/vm_fast/initial_bootloader_memory.rs @@ -1,5 +1,4 @@ -use zksync_types::U256; -use zksync_utils::{address_to_u256, h256_to_u256}; +use zksync_types::{address_to_u256, h256_to_u256, U256}; use crate::{interface::L1BatchEnv, vm_latest::utils::fee::get_batch_base_fee}; diff --git a/core/lib/multivm/src/versions/vm_fast/mod.rs b/core/lib/multivm/src/versions/vm_fast/mod.rs index bb5a342bff28..840653b63b08 100644 --- a/core/lib/multivm/src/versions/vm_fast/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/mod.rs @@ -1,17 +1,20 @@ -pub use zksync_vm2::interface::Tracer; +pub use zksync_vm2::interface; -pub use self::{circuits_tracer::CircuitsTracer, vm::Vm}; +pub(crate) use self::version::FastVmVersion; +pub use self::vm::Vm; mod bootloader_state; mod bytecode; mod circuits_tracer; mod events; +mod evm_deploy_tracer; mod glue; mod hook; mod initial_bootloader_memory; -mod pubdata; mod refund; #[cfg(test)] mod tests; mod transaction_data; +mod utils; +mod version; mod vm; diff --git a/core/lib/multivm/src/versions/vm_fast/pubdata.rs b/core/lib/multivm/src/versions/vm_fast/pubdata.rs deleted file mode 100644 index d07732ae4350..000000000000 --- a/core/lib/multivm/src/versions/vm_fast/pubdata.rs +++ /dev/null @@ -1,123 +0,0 @@ -use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; - -use crate::utils::events::L1MessengerL2ToL1Log; - -/// Struct based on which the pubdata blob is formed -#[derive(Debug, Clone, Default)] -pub(crate) struct PubdataInput { - pub(crate) user_logs: Vec, - pub(crate) l2_to_l1_messages: Vec>, - pub(crate) published_bytecodes: Vec>, - pub(crate) state_diffs: Vec, -} - -impl PubdataInput { - pub(crate) fn build_pubdata(self, with_uncompressed_state_diffs: bool) -> Vec { - let mut l1_messenger_pubdata = vec![]; - - let PubdataInput { - user_logs, - l2_to_l1_messages, - published_bytecodes, - state_diffs, - } = self; - - // Encoding user L2->L1 logs. - // Format: `[(numberOfL2ToL1Logs as u32) || l2tol1logs[1] || ... || l2tol1logs[n]]` - l1_messenger_pubdata.extend((user_logs.len() as u32).to_be_bytes()); - for l2tol1log in user_logs { - l1_messenger_pubdata.extend(l2tol1log.packed_encoding()); - } - - // Encoding L2->L1 messages - // Format: `[(numberOfMessages as u32) || (messages[1].len() as u32) || messages[1] || ... || (messages[n].len() as u32) || messages[n]]` - l1_messenger_pubdata.extend((l2_to_l1_messages.len() as u32).to_be_bytes()); - for message in l2_to_l1_messages { - l1_messenger_pubdata.extend((message.len() as u32).to_be_bytes()); - l1_messenger_pubdata.extend(message); - } - - // Encoding bytecodes - // Format: `[(numberOfBytecodes as u32) || (bytecodes[1].len() as u32) || bytecodes[1] || ... || (bytecodes[n].len() as u32) || bytecodes[n]]` - l1_messenger_pubdata.extend((published_bytecodes.len() as u32).to_be_bytes()); - for bytecode in published_bytecodes { - l1_messenger_pubdata.extend((bytecode.len() as u32).to_be_bytes()); - l1_messenger_pubdata.extend(bytecode); - } - - // Encoding state diffs - // Format: `[size of compressed state diffs u32 || compressed state diffs || (# state diffs: intial + repeated) as u32 || sorted state diffs by ]` - let state_diffs_compressed = compress_state_diffs(state_diffs.clone()); - l1_messenger_pubdata.extend(state_diffs_compressed); - - if with_uncompressed_state_diffs { - l1_messenger_pubdata.extend((state_diffs.len() as u32).to_be_bytes()); - for state_diff in state_diffs { - l1_messenger_pubdata.extend(state_diff.encode_padded()); - } - } - - l1_messenger_pubdata - } -} - -#[cfg(test)] -mod tests { - use zksync_system_constants::{ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS}; - use zksync_utils::u256_to_h256; - - use super::*; - - #[test] - fn test_basic_pubdata_building() { - // Just using some constant addresses for tests - let addr1 = BOOTLOADER_ADDRESS; - let addr2 = ACCOUNT_CODE_STORAGE_ADDRESS; - - let user_logs = vec![L1MessengerL2ToL1Log { - l2_shard_id: 0, - is_service: false, - tx_number_in_block: 0, - sender: addr1, - key: 1.into(), - value: 128.into(), - }]; - - let l2_to_l1_messages = vec![hex::decode("deadbeef").unwrap()]; - - let published_bytecodes = vec![hex::decode("aaaabbbb").unwrap()]; - - // For covering more cases, we have two state diffs: - // One with enumeration index present (and so it is a repeated write) and the one without it. - let state_diffs = vec![ - StateDiffRecord { - address: addr2, - key: 155.into(), - derived_key: u256_to_h256(125.into()).0, - enumeration_index: 12, - initial_value: 11.into(), - final_value: 12.into(), - }, - StateDiffRecord { - address: addr2, - key: 156.into(), - derived_key: u256_to_h256(126.into()).0, - enumeration_index: 0, - initial_value: 0.into(), - final_value: 14.into(), - }, - ]; - - let input = PubdataInput { - user_logs, - l2_to_l1_messages, - published_bytecodes, - state_diffs, - }; - - let pubdata = - ethabi::encode(&[ethabi::Token::Bytes(input.build_pubdata(true))])[32..].to_vec(); - - assert_eq!(hex::encode(pubdata), "00000000000000000000000000000000000000000000000000000000000002c700000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000004aaaabbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"); - } -} diff --git a/core/lib/multivm/src/versions/vm_fast/refund.rs b/core/lib/multivm/src/versions/vm_fast/refund.rs index 05648acddcfe..13637ff97122 100644 --- a/core/lib/multivm/src/versions/vm_fast/refund.rs +++ b/core/lib/multivm/src/versions/vm_fast/refund.rs @@ -1,5 +1,4 @@ -use zksync_types::{H256, U256}; -use zksync_utils::ceil_div_u256; +use zksync_types::{ceil_div_u256, H256, U256}; use crate::{interface::L1BatchEnv, vm_latest::utils::fee::get_batch_base_fee}; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs index dd407c616682..bb66eb2f7705 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs @@ -1,392 +1,6 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use itertools::Itertools; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, fee_model::BatchFeeInput, get_code_key, - l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use super::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::{get_complex_upgrade_abi, read_complex_upgrade}, -}; -use crate::{ - interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::default_l1_batch, - vm_latest::constants::{ - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, - MAX_VM_PUBDATA_PER_BATCH, - }, -}; - -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -const CALLS_PER_TX: usize = 1_000; -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec> { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .chunks(CALLS_PER_TX) - .into_iter() - .map(|chunk| { - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(chunk.collect_vec())]) - .unwrap() - }) - .collect_vec(); - - encoded_calls -} - -struct TestStatistics { - pub max_used_gas: u32, - pub circuit_statistics: u64, - pub execution_metrics_size: u64, -} - -struct StatisticsTagged { - pub statistics: TestStatistics, - pub tag: String, -} - -fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - // We are measuring computational cost, so prices for pubdata don't matter, while they artificially dilute - // the gas limit - let batch_env = L1BatchEnv { - fee_input: BatchFeeInput::pubdata_independent(100_000, 100_000, 100_000), - ..default_l1_batch(zksync_types::L1BatchNumber(1)) - }; - - let mut vm = VmTesterBuilder::new() - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_l1_batch_env(batch_env) - .build(); - - let bytecodes = test_data.bytecodes.iter().map(Vec::as_slice); - vm.vm.insert_bytecodes(bytecodes); - - let txs_data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - - for (i, data) in txs_data.into_iter().enumerate() { - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), - calldata: data, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction {i} wasn't successful for input: {test_data:#?}" - ); - } - - // Now we count how much gas was spent at the end of the batch - // It is assumed that the top level frame is the bootloader - vm.vm.enforce_state_diffs(test_data.state_diffs.clone()); - let gas_before = vm.vm.gas_remaining(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!( - !result.result.is_failed(), - "Batch wasn't successful for input: {test_data:?}" - ); - let gas_after = vm.vm.gas_remaining(); - assert_eq!((gas_before - gas_after) as u64, result.statistics.gas_used); - - TestStatistics { - max_used_gas: gas_before - gas_after, - circuit_statistics: result.statistics.circuit_statistic.total() as u64, - execution_metrics_size: result.get_execution_metrics(None).size() as u64, - } -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -// A valid zkEVM bytecode has odd number of 32 byte words -fn get_valid_bytecode_length(length: usize) -> usize { - // Firstly ensure that the length is divisible by 32 - let length_padded_to_32 = if length % 32 == 0 { - length - } else { - length + 32 - (length % 32) - }; - - // Then we ensure that the number returned by division by 32 is odd - if length_padded_to_32 % 64 == 0 { - length_padded_to_32 + 32 - } else { - length_padded_to_32 - } -} +use crate::{versions::testonly::block_tip::test_dry_run_upper_bound, vm_fast::Vm}; #[test] -fn test_dry_run_upper_bound() { - // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). - // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` - // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. - const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = - (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; - - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let statistics = vec![ - // max logs - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, - ..Default::default() - }), - tag: "max_logs".to_string(), - }, - // max messages - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, - // so the max number of pubdata is bound by it - messages: vec![ - vec![0; 0]; - MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) - ], - ..Default::default() - }), - tag: "max_messages".to_string(), - }, - // long message - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], - ..Default::default() - }), - tag: "long_message".to_string(), - }, - // max bytecodes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long. - // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number - bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], - ..Default::default() - }), - tag: "max_bytecodes".to_string(), - }, - // long bytecode - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - bytecodes: vec![ - vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; - 1 - ], - ..Default::default() - }), - tag: "long_bytecode".to_string(), - }, - // lots of small repeated writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), - ..Default::default() - }), - tag: "small_repeated_writes".to_string(), - }, - // lots of big repeated writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs( - true, - false, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37, - ), - ..Default::default() - }), - tag: "big_repeated_writes".to_string(), - }, - // lots of small initial writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out - state_diffs: generate_state_diffs( - false, - true, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33, - ), - ..Default::default() - }), - tag: "small_initial_writes".to_string(), - }, - // lots of large initial writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs( - false, - false, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65, - ), - ..Default::default() - }), - tag: "big_initial_writes".to_string(), - }, - ]; - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - let max_used_gas = statistics - .iter() - .map(|s| (s.statistics.max_used_gas, s.tag.clone())) - .max() - .unwrap(); - assert!( - max_used_gas.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", - max_used_gas.1, - max_used_gas.0, - BOOTLOADER_BATCH_TIP_OVERHEAD - ); - - let circuit_statistics = statistics - .iter() - .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) - .max() - .unwrap(); - assert!( - circuit_statistics.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", - circuit_statistics.1, - circuit_statistics.0, - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD - ); - - let execution_metrics_size = statistics - .iter() - .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) - .max() - .unwrap(); - assert!( - execution_metrics_size.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", - execution_metrics_size.1, - execution_metrics_size.0, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD - ); +fn dry_run_upper_bound() { + test_dry_run_upper_bound::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs index 48e1b10de442..6075aea09898 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs @@ -1,52 +1,14 @@ -use assert_matches::assert_matches; -use zksync_types::U256; -use zksync_vm2::interface::HeapId; - use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, - versions::vm_fast::tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, + versions::testonly::bootloader::{test_bootloader_out_of_gas, test_dummy_bootloader}, + vm_fast::Vm, }; #[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - - verify_required_memory(&vm.vm.inner, vec![(correct_first_cell, HeapId::FIRST, 0)]); +fn dummy_bootloader() { + test_dummy_bootloader::>(); } #[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_bootloader_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); +fn bootloader_out_of_gas() { + test_bootloader_out_of_gas::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs index 3070140c00b3..8a662c38827d 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs @@ -1,38 +1,6 @@ -use crate::{ - interface::{TxExecutionMode, VmEvent, VmExecutionMode, VmInterface, VmInterfaceExt}, - utils::bytecode, - vm_fast::tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, -}; +use crate::{versions::testonly::bytecode_publishing::test_bytecode_publishing, vm_fast::Vm}; #[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = bytecode::compress(counter.clone()).unwrap().compressed; - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = VmEvent::extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); +fn bytecode_publishing() { + test_bytecode_publishing::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs deleted file mode 100644 index c97b38b6afc4..000000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs index f40e5336eb3d..e7521d87c1cd 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs @@ -1,74 +1,6 @@ -use zksync_types::{Address, Execute, U256}; +use crate::{versions::testonly::circuits::test_circuits, vm_fast::Vm}; -use super::tester::VmTesterBuilder; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, -}; - -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. #[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(Address::random()), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed(), "{res:#?}"); - - let s = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - const EXPECTED: [f32; 13] = [ - 1.34935, 0.15026, 1.66666, 0.00315, 1.0594, 0.00058, 0.00348, 0.00076, 0.11945, 0.14285, - 0.0, 0.0, 0.0, - ]; - let actual = [ - (s.main_vm, "main_vm"), - (s.ram_permutation, "ram_permutation"), - (s.storage_application, "storage_application"), - (s.storage_sorter, "storage_sorter"), - (s.code_decommitter, "code_decommitter"), - (s.code_decommitter_sorter, "code_decommitter_sorter"), - (s.log_demuxer, "log_demuxer"), - (s.events_sorter, "events_sorter"), - (s.keccak256, "keccak256"), - (s.ecrecover, "ecrecover"), - (s.sha256, "sha256"), - (s.secp256k1_verify, "secp256k1_verify"), - (s.transient_storage_checker, "transient_storage_checker"), - ]; - for ((actual, name), expected) in actual.iter().zip(EXPECTED) { - if expected == 0.0 { - assert_eq!( - *actual, expected, - "Check failed for {}, expected {}, actual {}", - name, expected, actual - ); - } else { - let diff = (actual - expected) / expected; - assert!( - diff.abs() < 0.1, - "Check failed for {}, expected {}, actual {}", - name, - expected, - actual - ); - } - } +fn circuits() { + test_circuits::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs index 34342d7f3b87..4ef861287341 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs @@ -1,252 +1,21 @@ -use ethabi::Token; -use zksync_types::{ - get_known_code_key, web3::keccak256, Address, Execute, StorageLogWithPreviousValue, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::ContractToDeploy, - vm_fast::{ - tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::{load_precompiles_contract, read_precompiles_contract, read_test_contract}, - }, - CircuitsTracer, + versions::testonly::code_oracle::{ + test_code_oracle, test_code_oracle_big_bytecode, test_refunds_in_code_oracle, }, + vm_fast::Vm, }; -fn generate_large_bytecode() -> Vec { - // This is the maximal possible size of a zkEVM bytecode - vec![2u8; ((1 << 16) - 1) * 32] -} - #[test] -fn test_code_oracle() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - // Filling the zkevm bytecode - let normal_zkevm_bytecode = read_test_contract(); - let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&normal_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new( - precompile_contract_bytecode, - precompiles_contract_address, - )]) - .with_storage(storage) - .build(); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - vm.vm.insert_bytecodes([normal_zkevm_bytecode.as_slice()]); - let account = &mut vm.rich_accounts[0]; - - // Firstly, let's ensure that the contract works. - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - - // Now, we ask for the same bytecode. We use to partially check whether the memory page with - // the decommitted bytecode gets erased (it shouldn't). - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx2); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); -} - -fn find_code_oracle_cost_log( - precompiles_contract_address: Address, - logs: &[StorageLogWithPreviousValue], -) -> &StorageLogWithPreviousValue { - logs.iter() - .find(|log| { - *log.log.key.address() == precompiles_contract_address && log.log.key.key().is_zero() - }) - .expect("no code oracle cost log") +fn code_oracle() { + test_code_oracle::>(); } #[test] -fn test_code_oracle_big_bytecode() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - let big_zkevm_bytecode = generate_large_bytecode(); - let big_zkevm_bytecode_hash = hash_bytecode(&big_zkevm_bytecode); - let big_zkevm_bytecode_keccak_hash = keccak256(&big_zkevm_bytecode); - - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&big_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new( - precompile_contract_bytecode, - precompiles_contract_address, - )]) - .with_storage(storage) - .build(); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - vm.vm.insert_bytecodes([big_zkevm_bytecode.as_slice()]); - - let account = &mut vm.rich_accounts[0]; - - // Firstly, let's ensure that the contract works. - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(big_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); +fn code_oracle_big_bytecode() { + test_code_oracle_big_bytecode::>(); } #[test] fn refunds_in_code_oracle() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - let normal_zkevm_bytecode = read_test_contract(); - let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&normal_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - // Execute code oracle twice with identical VM state that only differs in that the queried bytecode - // is already decommitted the second time. The second call must consume less gas (`decommit` doesn't charge additional gas - // for already decommitted codes). - let mut oracle_costs = vec![]; - for decommit in [false, true] { - let mut vm = VmTesterBuilder::new() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new( - precompile_contract_bytecode.clone(), - precompiles_contract_address, - )]) - .with_storage(storage.clone()) - .build(); - - vm.vm.insert_bytecodes([normal_zkevm_bytecode.as_slice()]); - - let account = &mut vm.rich_accounts[0]; - if decommit { - let (_, is_fresh) = vm.vm.inner.world_diff_mut().decommit_opcode( - &mut vm.vm.world, - &mut ((), CircuitsTracer::default()), - h256_to_u256(normal_zkevm_bytecode_hash), - ); - assert!(is_fresh); - } - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - let log = - find_code_oracle_cost_log(precompiles_contract_address, &result.logs.storage_logs); - oracle_costs.push(log.log.value); - } - - // The refund is equal to `gasCost` parameter passed to the `decommit` opcode, which is defined as `4 * contract_length_in_words` - // in `CodeOracle.yul`. - let code_oracle_refund = h256_to_u256(oracle_costs[0]) - h256_to_u256(oracle_costs[1]); - assert_eq!( - code_oracle_refund, - (4 * (normal_zkevm_bytecode.len() / 32)).into() - ); + test_refunds_in_code_oracle::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs index c2ce02d39fe1..c3cfd8b29f37 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs @@ -1,81 +1,6 @@ -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_fast::tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - vm_latest::utils::fee::get_batch_base_fee, -}; +use crate::{versions::testonly::default_aa::test_default_aa_interaction, vm_fast::Vm}; #[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = [ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage( - &expected_slots, - &mut vm.vm.world.storage, - vm.vm.inner.world_diff().get_storage_state(), - ); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &vm.fee_account, - &mut vm.vm.world.storage, - vm.vm.inner.world_diff().get_storage_state(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); +fn default_aa_interaction() { + test_default_aa_interaction::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/evm_emulator.rs b/core/lib/multivm/src/versions/vm_fast/tests/evm_emulator.rs new file mode 100644 index 000000000000..7b5ea3e4447b --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/evm_emulator.rs @@ -0,0 +1,69 @@ +use test_casing::{test_casing, Product}; + +use crate::{ + versions::testonly::evm_emulator::{ + test_calling_to_mock_emulator_from_native_contract, test_mock_emulator_basics, + test_mock_emulator_with_delegate_call, test_mock_emulator_with_deployment, + test_mock_emulator_with_partial_reverts, test_mock_emulator_with_payment, + test_mock_emulator_with_recursion, test_mock_emulator_with_recursive_deployment, + test_mock_emulator_with_static_call, test_tracing_evm_contract_deployment, + }, + vm_fast::Vm, +}; + +#[test] +fn tracing_evm_contract_deployment() { + test_tracing_evm_contract_deployment::>(); +} + +#[test] +fn mock_emulator_basics() { + test_mock_emulator_basics::>(); +} + +#[test_casing(2, [false, true])] +#[test] +fn mock_emulator_with_payment(deploy_emulator: bool) { + test_mock_emulator_with_payment::>(deploy_emulator); +} + +#[test_casing(4, Product(([false, true], [false, true])))] +#[test] +fn mock_emulator_with_recursion(deploy_emulator: bool, is_external: bool) { + test_mock_emulator_with_recursion::>(deploy_emulator, is_external); +} + +#[test] +fn calling_to_mock_emulator_from_native_contract() { + test_calling_to_mock_emulator_from_native_contract::>(); +} + +#[test] +fn mock_emulator_with_deployment() { + test_mock_emulator_with_deployment::>(false); +} + +#[test] +fn mock_emulator_with_reverted_deployment() { + test_mock_emulator_with_deployment::>(false); +} + +#[test] +fn mock_emulator_with_recursive_deployment() { + test_mock_emulator_with_recursive_deployment::>(); +} + +#[test] +fn mock_emulator_with_partial_reverts() { + test_mock_emulator_with_partial_reverts::>(); +} + +#[test] +fn mock_emulator_with_delegate_call() { + test_mock_emulator_with_delegate_call::>(); +} + +#[test] +fn mock_emulator_with_static_call() { + test_mock_emulator_with_static_call::>(); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs index 3f0a47b980e2..6ba55f8e1f8c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs @@ -1,39 +1,6 @@ -use zksync_test_account::Account; -use zksync_types::{fee::Fee, Execute}; +use crate::{versions::testonly::gas_limit::test_tx_gas_limit_offset, vm_fast::Vm}; -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_fast::tests::tester::VmTesterBuilder, - vm_latest::constants::{TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. #[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Account::default_fee() - }), - ); - - vm.vm.push_transaction(tx); - - assert!(!vm.vm.has_previous_far_calls()); - let gas_limit_from_memory = vm - .vm - .read_word_from_bootloader_heap(TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET); - - assert_eq!(gas_limit_from_memory, gas_limit); +fn tx_gas_limit_offset() { + test_tx_gas_limit_offset::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs index 0447304f69f4..5ec30907ed57 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -1,241 +1,22 @@ -use std::{collections::HashSet, iter}; - -use assert_matches::assert_matches; -use ethabi::Token; -use itertools::Itertools; -use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{AccountTreeId, Address, Execute, StorageKey, H256, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - use crate::{ - interface::{ - storage::ReadStorage, ExecutionResult, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, - }, - versions::testonly::ContractToDeploy, - vm_fast::{ - tests::{ - tester::{TxType, VmTester, VmTesterBuilder}, - utils::{read_proxy_counter_contract, read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - vm::Vm, + versions::testonly::get_used_contracts::{ + test_get_used_contracts, test_get_used_contracts_with_far_call, + test_get_used_contracts_with_out_of_gas_far_call, }, + vm_fast::Vm, }; #[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_base_system_contracts(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_decommitted_hashes()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .decommitted_hashes() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm.decommitted_hashes().collect::>(), - known_bytecodes_without_base_system_contracts(&vm.vm) - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_decommitted_hashes()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), - calldata: big_calldata, - value: Default::default(), - factory_deps: vec![vec![1; 32]], - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_base_system_contracts(&vm.vm).contains(&hash_to_u256)); - assert!(!vm.vm.decommitted_hashes().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_base_system_contracts(vm: &Vm) -> HashSet { - let mut known_bytecodes_without_base_system_contracts = vm - .world - .bytecode_cache - .keys() - .cloned() - .collect::>(); - known_bytecodes_without_base_system_contracts - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); - if let Some(evm_emulator) = &BASE_SYSTEM_CONTRACTS.evm_emulator { - let was_removed = - known_bytecodes_without_base_system_contracts.remove(&h256_to_u256(evm_emulator.hash)); - assert!(was_removed); - } - known_bytecodes_without_base_system_contracts -} - -/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial -/// decommitment cost (>10,000 gas). -fn inflated_counter_bytecode() -> Vec { - let mut counter_bytecode = read_test_contract(); - counter_bytecode.extend( - iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) - .take(10_000) - .flatten(), - ); - counter_bytecode -} - -#[derive(Debug)] -struct ProxyCounterData { - proxy_counter_address: Address, - counter_bytecode_hash: U256, -} - -fn execute_proxy_counter(gas: u32) -> (VmTester<()>, ProxyCounterData, VmExecutionResultAndLogs) { - let counter_bytecode = inflated_counter_bytecode(); - let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); - let counter_address = Address::repeat_byte(0x23); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_custom_contracts(vec![ContractToDeploy::new( - counter_bytecode, - counter_address, - )]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let (proxy_counter_bytecode, proxy_counter_abi) = read_proxy_counter_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx( - &proxy_counter_bytecode, - Some(&[Token::Address(counter_address)]), - TxType::L2, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); - compression_result.unwrap(); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); - assert!( - !decommitted_hashes.contains(&counter_bytecode_hash), - "{decommitted_hashes:?}" - ); - - let increment = proxy_counter_abi.function("increment").unwrap(); - let increment_tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(deploy_tx.address), - calldata: increment - .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) - .unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(increment_tx, true); - compression_result.unwrap(); - let data = ProxyCounterData { - proxy_counter_address: deploy_tx.address, - counter_bytecode_hash, - }; - (vm, data, exec_result) +fn get_used_contracts() { + test_get_used_contracts::>(); } #[test] fn get_used_contracts_with_far_call() { - let (vm, data, exec_result) = execute_proxy_counter(100_000); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); - assert!( - decommitted_hashes.contains(&data.counter_bytecode_hash), - "{decommitted_hashes:?}" - ); + test_get_used_contracts_with_far_call::>(); } #[test] fn get_used_contracts_with_out_of_gas_far_call() { - let (mut vm, data, exec_result) = execute_proxy_counter(10_000); - assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); - let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); - assert!( - decommitted_hashes.contains(&data.counter_bytecode_hash), - "{decommitted_hashes:?}" - ); - - // Execute another transaction with a successful far call and check that it's still charged for decommitment. - let account = &mut vm.rich_accounts[0]; - let (_, proxy_counter_abi) = read_proxy_counter_contract(); - let increment = proxy_counter_abi.function("increment").unwrap(); - let increment_tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(data.proxy_counter_address), - calldata: increment - .encode_input(&[Token::Uint(1.into()), Token::Uint(u64::MAX.into())]) - .unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(increment_tx, true); - compression_result.unwrap(); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let proxy_counter_cost_key = StorageKey::new( - AccountTreeId::new(data.proxy_counter_address), - H256::from_low_u64_be(1), - ); - let far_call_cost_log = exec_result - .logs - .storage_logs - .iter() - .find(|log| log.log.key == proxy_counter_cost_key) - .expect("no cost log"); - assert!( - far_call_cost_log.previous_value.is_zero(), - "{far_call_cost_log:?}" - ); - let far_call_cost = h256_to_u256(far_call_cost_log.log.value); - assert!(far_call_cost > 10_000.into(), "{far_call_cost}"); + test_get_used_contracts_with_out_of_gas_far_call::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs deleted file mode 100644 index dde83d8a9f36..000000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_latest::tests::tester::VmTesterBuilder; -use crate::vm_latest::types::inputs::system_env::TxExecutionMode; -use crate::vm_latest::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs index df8d992f02fe..522aa2413f6d 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs @@ -1,46 +1,6 @@ -use zksync_types::get_nonce_key; - -use crate::{ - interface::{ - storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - }, - vm_fast::tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, -}; +use crate::{versions::testonly::is_write_initial::test_is_write_initial_behaviour, vm_fast::Vm}; #[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); +fn is_write_initial_behaviour() { + test_is_write_initial_behaviour::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index 5897ec5f2662..f02957020178 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -1,198 +1,22 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - Execute, ExecuteTransactionCommon, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - utils::StorageWritesDeduplicator, - vm_fast::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - transaction_data::TransactionData, + versions::testonly::l1_tx_execution::{ + test_l1_tx_execution, test_l1_tx_execution_gas_estimation_with_low_gas, + test_l1_tx_execution_high_gas_limit, }, + vm_fast::Vm, }; #[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 9 initial writes here, because we pay fees from l1: - // - `totalSupply` of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - `tx_rolling` hash - // - `gasPerPubdataByte` - // - `basePubdataSpent` - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in `L1Messenger` - - // TODO(PLA-537): right now we are using 5 slots instead of 9 due to 0 fee for transaction. - let basic_initial_writes = 5; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - assert!(!res.result.is_failed()); - - for (expected_value, storage_location) in [ - (U256::from(1u32), known_codes_key), - (h256_to_u256(deploy_tx.bytecode_hash), account_code_key), - ] { - assert_eq!( - expected_value, - vm.vm.inner.world_diff().get_storage_state()[&( - *storage_location.address(), - h256_to_u256(*storage_location.key()) - )] - ); - } - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes, basic_initial_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract. - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated. - // But now the base pubdata spent has changed too. - assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); - assert_eq!(res.repeated_storage_writes, 1); +fn l1_tx_execution() { + test_l1_tx_execution::>(); } #[test] -fn test_l1_tx_execution_high_gas_limit() { - // In this test, we try to execute an L1->L2 transaction with a high gas limit. - // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, - // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let l1_messenger = l1_messenger_contract(); - - let contract_function = l1_messenger.function("sendToL1").unwrap(); - let params = [ - // Even a message of size 100k should not be able to be sent by a priority transaction - Token::Bytes(vec![0u8; 100_000]), - ]; - let calldata = contract_function.encode_input(¶ms).unwrap(); - - let mut tx = account.get_l1_tx( - Execute { - contract_address: Some(L1_MESSENGER_ADDRESS), - value: 0.into(), - factory_deps: vec![], - calldata, - }, - 0, - ); - - if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { - // Using some large gas limit - data.gas_limit = 300_000_000.into(); - } else { - unreachable!() - }; - - vm.vm.push_transaction(tx); - - let res = vm.vm.execute(VmExecutionMode::OneTx); +fn l1_tx_execution_high_gas_limit() { + test_l1_tx_execution_high_gas_limit::>(); +} - assert!(res.result.is_failed(), "The transaction should've failed"); +#[test] +fn l1_tx_execution_gas_estimation_with_low_gas() { + test_l1_tx_execution_gas_estimation_with_low_gas::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs index fde94d9da6cd..0823bee6cc9e 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs @@ -1,424 +1,33 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, L2BlockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, L2BlockNumber, - ProtocolVersionId, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - use crate::{ - interface::{ - storage::ReadStorage, ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, - VmInterface, VmInterfaceExt, - }, - versions::testonly::default_l1_batch, - vm_fast::{tests::tester::VmTesterBuilder, vm::Vm}, - vm_latest::{ - constants::{TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO}, - utils::l2_blocks::get_l2_block_hash_key, + versions::testonly::l2_blocks::{ + test_l2_block_first_in_batch, test_l2_block_initialization_number_non_zero, + test_l2_block_initialization_timestamp, test_l2_block_new_l2_block, + test_l2_block_same_l2_block, }, + vm_fast::Vm, }; -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: Some(H160::zero()), - calldata: vec![], - value: U256::zero(), - factory_deps: vec![], - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - #[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current L2 block to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); +fn l2_block_initialization_timestamp() { + test_l2_block_initialization_timestamp::>(); } #[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first L2 block number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_initialization_number_non_zero() { + test_l2_block_initialization_number_non_zero::>(); } #[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_same_l2_block() { + test_l2_block_same_l2_block::>(); } #[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let mut storage_ptr = vm.vm.world.storage.borrow_mut(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr.set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.set_value( - prev_block_hash_position, - L2BlockHasher::legacy_hash(L2BlockNumber(miniblock_number - 1)), - ); - drop(storage_ptr); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_new_l2_block() { + test_l2_block_new_l2_block::>(); } #[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); - let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); - let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.write_to_bootloader_heap([ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ]) +fn l2_block_first_in_batch() { + test_l2_block_first_in_batch::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs index 730c573cdcf4..0a26e895b5a7 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs @@ -1,10 +1,28 @@ +use std::{any::Any, collections::HashSet, fmt, rc::Rc}; + +use zksync_types::{ + h256_to_u256, writes::StateDiffRecord, StorageKey, Transaction, H160, H256, U256, +}; +use zksync_vm2::interface::{Event, HeapId, StateInterface}; +use zksync_vm_interface::{ + pubdata::PubdataBuilder, storage::ReadStorage, CurrentExecutionState, L2BlockEnv, + VmExecutionMode, VmExecutionResultAndLogs, VmInterface, +}; + +use super::{circuits_tracer::CircuitsTracer, Vm}; +use crate::{ + interface::storage::{ImmutableStorageView, InMemoryStorage}, + versions::testonly::TestedVm, + vm_fast::evm_deploy_tracer::{DynamicBytecodes, EvmDeployTracer}, +}; + mod block_tip; mod bootloader; mod bytecode_publishing; -mod default_aa; -// mod call_tracer; FIXME: requires tracers mod circuits; mod code_oracle; +mod default_aa; +mod evm_emulator; mod gas_limit; mod get_used_contracts; mod is_write_initial; @@ -12,15 +30,141 @@ mod l1_tx_execution; mod l2_blocks; mod nonce_holder; mod precompiles; -// mod prestate_tracer; FIXME: is pre-state tracer still relevant? mod refunds; mod require_eip712; mod rollbacks; -mod sekp256r1; +mod secp256r1; mod simple_execution; mod storage; -mod tester; mod tracing_execution_error; mod transfer; mod upgrade; -mod utils; + +trait ObjectSafeEq: fmt::Debug + AsRef { + fn eq(&self, other: &dyn ObjectSafeEq) -> bool; +} + +#[derive(Debug)] +struct BoxedEq(T); + +impl AsRef for BoxedEq { + fn as_ref(&self) -> &dyn Any { + &self.0 + } +} + +impl ObjectSafeEq for BoxedEq { + fn eq(&self, other: &dyn ObjectSafeEq) -> bool { + let Some(other) = other.as_ref().downcast_ref::() else { + return false; + }; + self.0 == *other + } +} + +// TODO this doesn't include all the state of ModifiedWorld +#[derive(Debug)] +pub(crate) struct VmStateDump { + state: Box, + storage_writes: Vec<((H160, U256), U256)>, + events: Box<[Event]>, +} + +impl PartialEq for VmStateDump { + fn eq(&self, other: &Self) -> bool { + self.state.as_ref().eq(other.state.as_ref()) + && self.storage_writes == other.storage_writes + && self.events == other.events + } +} + +impl TestedVm for Vm> { + type StateDump = VmStateDump; + + fn dump_state(&self) -> Self::StateDump { + VmStateDump { + state: Box::new(BoxedEq(self.inner.dump_state())), + storage_writes: self.inner.get_storage_state().collect(), + events: self.inner.events().collect(), + } + } + + fn gas_remaining(&mut self) -> u32 { + self.gas_remaining() + } + + fn get_current_execution_state(&self) -> CurrentExecutionState { + self.get_current_execution_state() + } + + fn decommitted_hashes(&self) -> HashSet { + self.decommitted_hashes().collect() + } + + fn finish_batch_with_state_diffs( + &mut self, + diffs: Vec, + pubdata_builder: Rc, + ) -> VmExecutionResultAndLogs { + self.enforce_state_diffs(diffs); + self.finish_batch(pubdata_builder) + .block_tip_execution_result + } + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs { + self.inspect_inner(&mut Default::default(), VmExecutionMode::Batch, None) + } + + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]) { + self.insert_bytecodes(bytecodes.iter().copied()) + } + + fn known_bytecode_hashes(&self) -> HashSet { + self.world.bytecode_cache.keys().copied().collect() + } + + fn manually_decommit(&mut self, code_hash: H256) -> bool { + let mut tracer = ( + ((), CircuitsTracer::default()), + EvmDeployTracer::new(DynamicBytecodes::default()), + ); + let (_, is_fresh) = self.inner.world_diff_mut().decommit_opcode( + &mut self.world, + &mut tracer, + h256_to_u256(code_hash), + ); + is_fresh + } + + fn verify_required_bootloader_heap(&self, required_values: &[(u32, U256)]) { + for &(slot, expected_value) in required_values { + let current_value = self.inner.read_heap_u256(HeapId::FIRST, slot * 32); + assert_eq!(current_value, expected_value); + } + } + + fn write_to_bootloader_heap(&mut self, cells: &[(usize, U256)]) { + self.write_to_bootloader_heap(cells.iter().copied()); + } + + fn read_storage(&mut self, key: StorageKey) -> U256 { + let storage_changes = self.inner.world_diff().get_storage_state(); + let main_storage = &mut self.world.storage; + storage_changes + .get(&(*key.account().address(), h256_to_u256(*key.key()))) + .copied() + .unwrap_or_else(|| h256_to_u256(main_storage.read_value(&key))) + } + + fn last_l2_block_hash(&self) -> H256 { + self.bootloader_state.last_l2_block().get_hash() + } + + fn push_l2_block_unchecked(&mut self, block: L2BlockEnv) { + self.bootloader_state.push_l2_block(block); + } + + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { + self.push_transaction_inner(tx, refund, true); + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs index 6d1e0f016e9e..438d6aabe55b 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs @@ -1,180 +1,6 @@ -use zksync_types::{Execute, ExecuteTransactionCommon, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterfaceExt, - VmRevertReason, - }, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} +use crate::{versions::testonly::nonce_holder::test_nonce_holder, vm_fast::Vm}; #[test] -fn test_nonce_holder() { - let mut account = Account::random(); - let hex_addr = hex::encode(account.address.to_fixed_bytes()); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![ContractToDeploy::account( - read_nonce_holder_tester().to_vec(), - account.address, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let mut transaction = account.get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: Some(account.address), - calldata: vec![12], - value: Default::default(), - factory_deps: vec![], - }, - None, - Nonce(nonce), - ); - let ExecuteTransactionCommon::L2(tx_data) = &mut transaction.common_data else { - unreachable!(); - }; - tx_data.signature = vec![test_mode.into()]; - vm.vm.push_transaction_inner(transaction, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!(reason.to_string(), expected_error.to_string(), "{comment}"); - } else { - assert!(!result.result.is_failed(), "{comment}: {result:?}"); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), - "Allowed to leave nonce as unused", - ); +fn nonce_holder() { + test_nonce_holder::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs index b3ca15962172..ccf1463979cd 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs @@ -1,116 +1,19 @@ -use circuit_sequencer_api_1_5_0::geometry_config::get_geometry_config; -use zksync_types::{Address, Execute}; - -use super::{tester::VmTesterBuilder, utils::read_precompiles_contract}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::ContractToDeploy, - vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + versions::testonly::precompiles::{test_ecrecover, test_keccak, test_sha256}, + vm_fast::Vm, }; #[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let keccak_count = exec_result.statistics.circuit_statistic.keccak256 - * get_geometry_config().cycles_per_keccak256_circuit as f32; - assert!(keccak_count >= 1000.0, "{keccak_count}"); +fn keccak() { + test_keccak::>(); } #[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: hex::decode(sha1000_calldata).unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let sha_count = exec_result.statistics.circuit_statistic.sha256 - * get_geometry_config().cycles_per_sha256_circuit as f32; - assert!(sha_count >= 1000.0, "{sha_count}"); +fn sha256() { + test_sha256::>(); } #[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(account.address), - calldata: vec![], - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let ecrecover_count = exec_result.statistics.circuit_statistic.ecrecover - * get_geometry_config().cycles_per_ecrecover_circuit as f32; - assert!((ecrecover_count - 1.0).abs() < 1e-4, "{ecrecover_count}"); +fn ecrecover() { + test_ecrecover::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs deleted file mode 100644 index 63620c7d9ff8..000000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs +++ /dev/null @@ -1,143 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_test_account::TxType; -use zksync_types::{utils::deployed_address_create, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::PrestateTracer, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_simple_transfer_contract}, - HistoryEnabled, ToTracerPointer, - }, -}; - -#[test] -fn test_prestate_tracer() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - vm.deploy_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm.test_contract.unwrap(), - false, - Default::default(), - true, - TxType::L2, - ); - vm.vm.push_transaction(tx1); - - let contract_address = vm.test_contract.unwrap(); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(false, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm.inspect(tracer_ptr.into(), VmExecutionMode::Batch); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - assert!(prestate_result.1.contains_key(&contract_address)); -} - -#[test] -fn test_prestate_tracer_diff_mode() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - let contract = read_simple_transfer_contract(); - let tx = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce); - vm.test_contract = Some(deployed_address); - - // Deploy a second copy of the contract to see its appearance in the pre-state - let tx2 = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce2 = tx2.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx2); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address2 = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce2); - - let account = &mut vm.rich_accounts[0]; - - //enter ether to contract to see difference in the balance post execution - let tx0 = Execute { - contract_address: vm.test_contract.unwrap(), - calldata: Default::default(), - value: U256::from(100000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx0.clone(), None)); - - let tx1 = Execute { - contract_address: deployed_address2, - calldata: Default::default(), - value: U256::from(200000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx1, None)); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(true, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm - .inspect(tracer_ptr.into(), VmExecutionMode::Bootloader); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - //assert that the pre-state contains both deployed contracts with balance zero - assert!(prestate_result.0.contains_key(&deployed_address)); - assert!(prestate_result.0.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.0[&deployed_address].balance, - Some(U256::zero()) - ); - assert_eq!( - prestate_result.0[&deployed_address2].balance, - Some(U256::zero()) - ); - - //assert that the post-state contains both deployed contracts with the correct balance - assert!(prestate_result.1.contains_key(&deployed_address)); - assert!(prestate_result.1.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.1[&deployed_address].balance, - Some(U256::from(100000)) - ); - assert_eq!( - prestate_result.1[&deployed_address2].balance, - Some(U256::from(200000)) - ); -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs index 1856995149aa..335cb4afb1cd 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs @@ -1,221 +1,16 @@ -use ethabi::Token; -use zksync_types::{Address, Execute, U256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{read_expensive_contract, read_test_contract}, + versions::testonly::refunds::{ + test_negative_pubdata_for_transaction, test_predetermined_refunded_gas, }, + vm_fast::Vm, }; #[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - vm.vm - .push_transaction_inner(tx.clone(), result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.deduplicated_storage_logs, - current_state_without_predefined_refunds.deduplicated_storage_logs - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_transaction_inner(tx, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .deduplicated_storage_logs - .len(), - current_state_without_predefined_refunds - .deduplicated_storage_logs - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.deduplicated_storage_logs, - current_state_without_predefined_refunds.deduplicated_storage_logs - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); +fn predetermined_refunded_gas() { + test_predetermined_refunded_gas::>(); } #[test] fn negative_pubdata_for_transaction() { - let expensive_contract_address = Address::random(); - let (expensive_contract_bytecode, expensive_contract) = read_expensive_contract(); - let expensive_function = expensive_contract.function("expensive").unwrap(); - let cleanup_function = expensive_contract.function("cleanUp").unwrap(); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new( - expensive_contract_bytecode, - expensive_contract_address, - )]) - .build(); - - let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(expensive_contract_address), - calldata: expensive_function - .encode_input(&[Token::Uint(10.into())]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(expensive_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - - // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. - let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(expensive_contract_address), - calldata: cleanup_function.encode_input(&[]).unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(clean_up_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - assert!(result.refunds.operator_suggested_refund > 0); - assert_eq!( - result.refunds.gas_refunded, - result.refunds.operator_suggested_refund - ); + test_negative_pubdata_for_transaction::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs index b4448683cf71..22e4ebf258c7 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs @@ -1,175 +1,6 @@ -use ethabi::Token; -use zksync_eth_signer::TransactionParameters; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; -use zksync_utils::h256_to_u256; +use crate::{versions::testonly::require_eip712::test_require_eip712, vm_fast::Vm}; -use crate::{ - interface::{ - storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - }, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, -}; - -impl VmTester<()> { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &address, - ); - self.vm - .inner - .world_diff() - .get_storage_state() - .get(&(L2_BASE_TOKEN_ADDRESS, h256_to_u256(*key.key()))) - .copied() - .unwrap_or_else(|| h256_to_u256(self.vm.world.storage.read_value(&key))) - } -} - -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. #[test] -fn test_require_eip712() { - // Use 3 accounts: - // - `private_address` - EOA account, where we have the key - // - `account_address` - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_custom_contracts(vec![ContractToDeploy::account( - bytecode, - account_abstraction.address, - )]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the `private_address`. - // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: Some(account_abstraction.address), - calldata: encoded_input, - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - max_fee_per_blob_gas: None, - blob_versioned_hashes: None, - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000, false).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.into(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - Some(beneficiary.address), - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - vec![], - Default::default(), - ); - - let mut transaction_request: TransactionRequest = tx_712.into(); - transaction_request.chain_id = Some(chain_id.into()); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000, false).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.into(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); +fn require_eip712() { + test_require_eip712::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs index 1ac14e01f8ba..e8af23fa1e99 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs @@ -1,205 +1,21 @@ -use assert_matches::assert_matches; -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{Address, Execute, Nonce, U256}; -use zksync_vm_interface::VmInterfaceExt; - use crate::{ - interface::{ExecutionResult, TxExecutionMode}, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, + versions::testonly::rollbacks::{ + test_rollback_in_call_mode, test_vm_loadnext_rollbacks, test_vm_rollbacks, }, + vm_fast::Vm, }; #[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), - ), - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - ]); - - pretty_assertions::assert_eq!(result_without_rollbacks, result_with_rollbacks); +fn vm_rollbacks() { + test_vm_rollbacks::>(); } #[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused( - loadnext_deploy_tx.initiator_account(), - loadnext_deploy_tx.nonce().unwrap(), - ) - .into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused( - loadnext_deploy_tx.initiator_account(), - loadnext_deploy_tx.nonce().unwrap(), - ) - .into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); +fn vm_loadnext_rollbacks() { + test_vm_loadnext_rollbacks::>(); } #[test] fn rollback_in_call_mode() { - let counter_bytecode = read_test_contract(); - let counter_address = Address::repeat_byte(1); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::EthCall) - .with_custom_contracts(vec![ContractToDeploy::new( - counter_bytecode, - counter_address, - )]) - .with_random_rich_accounts(1) - .build(); - let account = &mut vm.rich_accounts[0]; - let tx = account.get_test_contract_transaction(counter_address, true, None, false, TxType::L2); - - let (compression_result, vm_result) = vm - .vm - .execute_transaction_with_bytecode_compression(tx, true); - compression_result.unwrap(); - assert_matches!( - vm_result.result, - ExecutionResult::Revert { output } - if output.to_string().contains("This method always reverts") - ); - assert_eq!(vm_result.logs.storage_logs, []); + test_rollback_in_call_mode::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/secp256r1.rs b/core/lib/multivm/src/versions/vm_fast/tests/secp256r1.rs new file mode 100644 index 000000000000..d9661c7f7139 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/secp256r1.rs @@ -0,0 +1,6 @@ +use crate::{versions::testonly::secp256r1::test_secp256r1, vm_fast::Vm}; + +#[test] +fn secp256r1() { + test_secp256r1::>(); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs index 8c916a541e21..4fe33d237e9e 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs @@ -1,80 +1,14 @@ -use assert_matches::assert_matches; - use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_fast::tests::tester::{TxType, VmTesterBuilder}, + versions::testonly::simple_execution::{test_estimate_fee, test_simple_execute}, + vm_fast::Vm, }; #[test] fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); + test_estimate_fee::>(); } #[test] fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); + test_simple_execute::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs index 2cfadb640e72..54a38814d3b5 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs @@ -1,133 +1,14 @@ -use ethabi::Token; -use zksync_contracts::{load_contract, read_bytecode}; -use zksync_types::{Address, Execute, U256}; - use crate::{ - interface::{ - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - }, - versions::testonly::ContractToDeploy, - vm_fast::tests::tester::VmTesterBuilder, + versions::testonly::storage::{test_storage_behavior, test_transient_storage_behavior}, + vm_fast::Vm, }; -fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 { - let bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let test_contract_address = Address::random(); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new(bytecode, test_contract_address)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata: first_tx_calldata, - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata: second_tx_calldata, - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.make_snapshot(); - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "First tx failed"); - vm.vm.pop_snapshot_no_rollback(); - - // We rollback once because transient storage and rollbacks are a tricky combination. - vm.vm.make_snapshot(); - vm.vm.push_transaction(tx2.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Second tx failed"); - vm.vm.rollback_to_the_latest_snapshot(); - - vm.vm.make_snapshot(); - vm.vm.push_transaction(tx2); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Second tx failed on second run"); - - result.statistics.pubdata_published -} - -fn test_storage_one_tx(second_tx_calldata: Vec) -> u32 { - test_storage(vec![], second_tx_calldata) -} - #[test] -fn test_storage_behavior() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - // In all of the tests below we provide the first tx to ensure that the tracers will not include - // the statistics from the start of the bootloader and will only include those for the transaction itself. - - let base_pubdata = test_storage_one_tx(vec![]); - let simple_test_pubdata = test_storage_one_tx( - contract - .function("simpleWrite") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - let resetting_write_pubdata = test_storage_one_tx( - contract - .function("resettingWrite") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - let resetting_write_via_revert_pubdata = test_storage_one_tx( - contract - .function("resettingWriteViaRevert") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - - assert_eq!(simple_test_pubdata - base_pubdata, 65); - assert_eq!(resetting_write_pubdata - base_pubdata, 34); - assert_eq!(resetting_write_via_revert_pubdata - base_pubdata, 34); +fn storage_behavior() { + test_storage_behavior::>(); } #[test] -fn test_transient_storage_behavior() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let first_tstore_test = contract - .function("testTransientStore") - .unwrap() - .encode_input(&[]) - .unwrap(); - // Second transaction checks that, as expected, the transient storage is cleared after the first transaction. - let second_tstore_test = contract - .function("assertTValue") - .unwrap() - .encode_input(&[Token::Uint(U256::zero())]) - .unwrap(); - - test_storage(first_tstore_test, second_tstore_test); +fn transient_storage_behavior() { + test_transient_storage_behavior::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs deleted file mode 100644 index 212e569d5107..000000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{get_empty_storage, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs deleted file mode 100644 index 6b1395f66340..000000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,240 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Nonce, Transaction, H160, U256}; -use zksync_vm2::interface::{Event, StateInterface}; - -use super::VmTester; -use crate::{ - interface::{ - storage::ReadStorage, CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, - VmExecutionMode, VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, - VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_fast::Vm, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce(Nonce, Nonce), - NonceReused(H160, Nonce), -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::Unknown { - function_selector: vec![144, 240, 73, 201], - data: vec![144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::Unknown { - function_selector: vec![144, 240, 73, 201], - data: vec![144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], - }) - - } - TxModifier::WrongNonce(expected, actual) => { - let function_selector = vec![98, 106, 222, 48]; - let expected_nonce_bytes = expected.0.to_be_bytes().to_vec(); - let actual_nonce_bytes = actual.0.to_be_bytes().to_vec(); - // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field - let nonce_padding = vec![0u8; 28]; - let data = [function_selector.clone(), nonce_padding.clone(), expected_nonce_bytes, nonce_padding.clone(), actual_nonce_bytes].concat(); - Halt::ValidationFailed(VmRevertReason::Unknown { - function_selector, - data - }) - } - TxModifier::NonceReused(addr, nonce) => { - let function_selector = vec![233, 10, 222, 212]; - let addr = addr.as_bytes().to_vec(); - // padding is 12 because an address takes up 20 bytes and we need it to fill a 32 byte field - let addr_padding = vec![0u8; 12]; - // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field - let nonce_padding = vec![0u8; 28]; - let data = [function_selector.clone(), addr_padding, addr, nonce_padding, nonce.0.to_be_bytes().to_vec()].concat(); - Halt::ValidationFailed(VmRevertReason::Unknown { - function_selector, - data, - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce(_, _) => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused(_, _) => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -// TODO this doesn't include all the state of ModifiedWorld -#[derive(Debug)] -struct VmStateDump { - state: S, - storage_writes: Vec<((H160, U256), U256)>, - events: Box<[Event]>, -} - -impl PartialEq for VmStateDump { - fn eq(&self, other: &Self) -> bool { - self.state == other.state - && self.storage_writes == other.storage_writes - && self.events == other.events - } -} - -impl Vm { - fn dump_state(&self) -> VmStateDump { - VmStateDump { - state: self.inner.dump_state(), - storage_writes: self.inner.get_storage_state().collect(), - events: self.inner.events().collect(), - } - } -} - -impl VmTester<()> { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - self.vm.make_snapshot(); - let inner_state_before = self.vm.dump_state(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_state(); - pretty_assertions::assert_eq!( - inner_state_before, - inner_state_after, - "Inner state before and after rollback should be equal" - ); - } else { - self.vm.pop_snapshot_no_rollback(); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs deleted file mode 100644 index 9549b32c4f1a..000000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs +++ /dev/null @@ -1,231 +0,0 @@ -use std::{cell::RefCell, rc::Rc}; - -use zksync_contracts::BaseSystemContracts; -use zksync_test_account::{Account, TxType}; -use zksync_types::{ - block::L2BlockHasher, utils::deployed_address_create, AccountTreeId, Address, L1BatchNumber, - L2BlockNumber, Nonce, StorageKey, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; -use zksync_vm2::{interface::Tracer, WorldDiff}; - -use crate::{ - interface::{ - storage::{InMemoryStorage, StoragePtr}, - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - versions::{ - testonly::{default_l1_batch, default_system_env, make_account_rich, ContractToDeploy}, - vm_fast::{tests::utils::read_test_contract, vm::Vm}, - }, - vm_latest::utils::l2_blocks::load_last_l2_block, -}; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, Tr>, - pub(crate) storage: StoragePtr, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) fee_account: Address, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.inspect(&mut Tr::default(), VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = Rc::new(RefCell::new(get_empty_storage())); - *self.vm.inner.world_diff_mut() = WorldDiff::default(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(&mut self.storage.borrow_mut(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(&mut self.storage.borrow_mut(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let storage = self.storage.clone(); - { - let mut storage = storage.borrow_mut(); - // Commit pending storage changes (old VM versions commit them on successful execution) - for (&(address, slot), &value) in self.vm.inner.world_diff().get_storage_state() { - let key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(slot)); - storage.set_value(key, u256_to_h256(value)); - } - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(&storage).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::custom(l1_batch, self.vm.system_env.clone(), storage); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - self.vm = vm; - } -} - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -impl VmTesterBuilder { - pub(crate) fn new() -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: default_system_env(), - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.bootloader_gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester<()> { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - ContractToDeploy::insert_all(&self.custom_contracts, &mut raw_storage); - let storage_ptr = Rc::new(RefCell::new(raw_storage)); - for account in self.rich_accounts.iter() { - make_account_rich(&mut storage_ptr.borrow_mut(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(&mut storage_ptr.borrow_mut(), deployer); - } - - let fee_account = l1_batch_env.fee_account; - let vm = Vm::custom(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - deployer: self.deployer, - test_contract: None, - fee_account, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs index 89f0fa236206..b3f5b4b33bcd 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs @@ -1,55 +1,8 @@ -use zksync_types::{Execute, H160}; - use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, + versions::testonly::tracing_execution_error::test_tracing_of_execution_errors, vm_fast::Vm, }; #[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![ContractToDeploy::new( - read_error_contract(), - contract_address, - )]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(contract_address), - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); +fn tracing_of_execution_errors() { + test_tracing_of_execution_errors::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs index ef510546f11c..57c2c3e2c348 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs @@ -1,215 +1,16 @@ -use ethabi::Token; -use zksync_contracts::{load_contract, read_bytecode}; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{utils::storage_key_for_eth_balance, AccountTreeId, Address, Execute, U256}; -use zksync_utils::u256_to_h256; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::get_balance, + versions::testonly::transfer::{ + test_reentrancy_protection_send_and_transfer, test_send_and_transfer, }, + vm_fast::Vm, }; -enum TestOptions { - Send(U256), - Transfer(U256), -} - -fn test_send_or_transfer(test_option: TestOptions) { - let test_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let recipient_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/Recipient.json", - ); - let test_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - - let test_contract_address = Address::random(); - let recipient_address = Address::random(); - - let (value, calldata) = match test_option { - TestOptions::Send(value) => ( - value, - test_abi - .function("send") - .unwrap() - .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) - .unwrap(), - ), - TestOptions::Transfer(value) => ( - value, - test_abi - .function("transfer") - .unwrap() - .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) - .unwrap(), - ), - }; - - let mut storage = get_empty_storage(); - storage.set_value( - storage_key_for_eth_balance(&test_contract_address), - u256_to_h256(value), - ); - - let mut vm = VmTesterBuilder::new() - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ - ContractToDeploy::new(test_bytecode, test_contract_address), - ContractToDeploy::new(recipient_bytecode, recipient_address), - ]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let tx_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !tx_result.result.is_failed(), - "Transaction wasn't successful" - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); - - let new_recipient_balance = get_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &recipient_address, - &mut vm.vm.world.storage, - vm.vm.inner.world_diff().get_storage_state(), - ); - - assert_eq!(new_recipient_balance, value); -} - #[test] -fn test_send_and_transfer() { - test_send_or_transfer(TestOptions::Send(U256::zero())); - test_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); - test_send_or_transfer(TestOptions::Transfer(U256::zero())); - test_send_or_transfer(TestOptions::Transfer(U256::from(10).pow(18.into()))); -} - -fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { - let test_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let reentrant_recipient_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", - ); - let test_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let reentrant_recipient_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", - ); - - let test_contract_address = Address::random(); - let reentrant_recipient_address = Address::random(); - - let (value, calldata) = match test_option { - TestOptions::Send(value) => ( - value, - test_abi - .function("send") - .unwrap() - .encode_input(&[ - Token::Address(reentrant_recipient_address), - Token::Uint(value), - ]) - .unwrap(), - ), - TestOptions::Transfer(value) => ( - value, - test_abi - .function("transfer") - .unwrap() - .encode_input(&[ - Token::Address(reentrant_recipient_address), - Token::Uint(value), - ]) - .unwrap(), - ), - }; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ - ContractToDeploy::new(test_bytecode, test_contract_address), - ContractToDeploy::new(reentrant_recipient_bytecode, reentrant_recipient_address), - ]) - .build(); - - // First transaction, the job of which is to warm up the slots for balance of the recipient as well as its storage variable. - let account = &mut vm.rich_accounts[0]; - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(reentrant_recipient_address), - calldata: reentrant_recipient_abi - .function("setX") - .unwrap() - .encode_input(&[]) - .unwrap(), - value: U256::from(1), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let tx1_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !tx1_result.result.is_failed(), - "Transaction 1 wasn't successful" - ); - - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value, - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx2); - let tx2_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - tx2_result.result.is_failed(), - "Transaction 2 should have failed, but it succeeded" - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); +fn send_and_transfer() { + test_send_and_transfer::>(); } #[test] -fn test_reentrancy_protection_send_and_transfer() { - test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::zero())); - test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); - test_reentrancy_protection_send_or_transfer(TestOptions::Transfer(U256::zero())); - test_reentrancy_protection_send_or_transfer(TestOptions::Transfer( - U256::from(10).pow(18.into()), - )); +fn reentrancy_protection_send_and_transfer() { + test_reentrancy_protection_send_and_transfer::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs index ba4863f7c457..4e4533c68689 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs @@ -1,343 +1,21 @@ -use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_upgrade::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - VmInterfaceHistoryEnabled, - }, - vm_fast::tests::{ - tester::VmTesterBuilder, - utils::{ - get_complex_upgrade_abi, read_complex_upgrade, read_test_contract, - verify_required_storage, - }, + versions::testonly::upgrade::{ + test_complex_upgrader, test_force_deploy_upgrade, test_protocol_upgrade_is_first, }, + vm_fast::Vm, }; -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block #[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); +fn protocol_upgrade_is_first() { + test_protocol_upgrade_is_first::>(); } -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. #[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = [(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage( - &expected_slots, - &mut *vm.storage.borrow_mut(), - vm.vm.inner.world_diff().get_storage_state(), - ); +fn force_deploy_upgrade() { + test_force_deploy_upgrade::>(); } -/// Here we show how the work with the complex upgrader could be done #[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in user space - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - { - let mut storage = vm.storage.borrow_mut(); - storage.set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage.set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage.set_value(account_code_key, bytecode_hash); - storage.store_factory_dep(bytecode_hash, read_complex_upgrade()); - storage.store_factory_dep(msg_sender_test_hash, read_msg_sender_test()); - } - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = [ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage( - &expected_slots, - &mut *vm.storage.borrow_mut(), - vm.vm.inner.world_diff().get_storage_state(), - ); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecode hash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), - calldata, - factory_deps: vec![], - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implementation itself -// For the explanation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: Some(COMPLEX_UPGRADER_ADDRESS), - calldata: complex_upgrader_calldata, - factory_deps: vec![], - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") +fn complex_upgrader() { + test_complex_upgrader::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs deleted file mode 100644 index eebd825c045f..000000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs +++ /dev/null @@ -1,134 +0,0 @@ -use std::collections::BTreeMap; - -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bootloader_code, read_bytecode, BaseSystemContracts, SystemContractCode, -}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H160, H256, - U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; -use zksync_vm2::interface::{HeapId, StateInterface}; - -use crate::interface::storage::ReadStorage; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -pub(crate) fn verify_required_memory( - state: &impl StateInterface, - required_values: Vec<(U256, HeapId, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state.read_heap_u256(memory_page, cell * 32); - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn verify_required_storage( - required_values: &[(H256, StorageKey)], - main_storage: &mut impl ReadStorage, - storage_changes: &BTreeMap<(H160, U256), U256>, -) { - for &(required_value, key) in required_values { - let current_value = storage_changes - .get(&(*key.account().address(), h256_to_u256(*key.key()))) - .copied() - .unwrap_or_else(|| h256_to_u256(main_storage.read_value(&key))); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: &mut impl ReadStorage, - storage_changes: &BTreeMap<(H160, U256), U256>, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - - storage_changes - .get(&(*key.account().address(), h256_to_u256(*key.key()))) - .copied() - .unwrap_or_else(|| h256_to_u256(main_storage.read_value(&key))) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_bootloader_code(test); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn load_precompiles_contract() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -pub(crate) fn read_expensive_contract() -> (Vec, Contract) { - const PATH: &str = - "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; - (read_bytecode(PATH), load_contract(PATH)) -} - -pub(crate) fn read_proxy_counter_contract() -> (Vec, Contract) { - const PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/proxy_counter.sol/ProxyCounter.json"; - (read_bytecode(PATH), load_contract(PATH)) -} diff --git a/core/lib/multivm/src/versions/vm_fast/transaction_data.rs b/core/lib/multivm/src/versions/vm_fast/transaction_data.rs index 2ec86eb3ceaf..02697beee341 100644 --- a/core/lib/multivm/src/versions/vm_fast/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_fast/transaction_data.rs @@ -1,19 +1,24 @@ use std::convert::TryInto; use zksync_types::{ + address_to_h256, + bytecode::BytecodeHash, ethabi::{encode, Address, Token}, fee::{encoding_len, Fee}, + h256_to_u256, l1::is_l1_tx_type, l2::{L2Tx, TransactionType}, transaction_request::{PaymasterParams, TransactionRequest}, web3::Bytes, Execute, ExecuteTransactionCommon, L2ChainId, L2TxCommonData, Nonce, Transaction, H256, U256, }; -use zksync_utils::{address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; -use crate::vm_latest::{ - constants::{MAX_GAS_PER_PUBDATA_BYTE, TX_MAX_COMPUTE_GAS_LIMIT}, - utils::overhead::derive_overhead, +use crate::{ + utils::bytecode::bytes_to_be_words, + vm_latest::{ + constants::{MAX_GAS_PER_PUBDATA_BYTE, TX_MAX_COMPUTE_GAS_LIMIT}, + utils::overhead::derive_overhead, + }, }; /// This structure represents the data that is used by @@ -190,16 +195,13 @@ impl TransactionData { let factory_deps_hashes = self .factory_deps .iter() - .map(|dep| h256_to_u256(hash_bytecode(dep))) + .map(|dep| BytecodeHash::for_bytecode(dep).value_u256()) .collect(); self.abi_encode_with_custom_factory_deps(factory_deps_hashes) } pub(crate) fn into_tokens(self) -> Vec { - let bytes = self.abi_encode(); - assert!(bytes.len() % 32 == 0); - - bytes_to_be_words(bytes) + bytes_to_be_words(&self.abi_encode()) } pub(crate) fn overhead_gas(&self) -> u32 { diff --git a/core/lib/multivm/src/versions/vm_fast/utils.rs b/core/lib/multivm/src/versions/vm_fast/utils.rs new file mode 100644 index 000000000000..20a6545d3385 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/utils.rs @@ -0,0 +1,13 @@ +use zksync_types::U256; +use zksync_vm2::{interface::StateInterface, FatPointer}; + +pub(super) fn read_fat_pointer(state: &S, raw: U256) -> Vec { + let pointer = FatPointer::from(raw); + let length = pointer.length - pointer.offset; + let start = pointer.start + pointer.offset; + let mut result = vec![0; length as usize]; + for i in 0..length { + result[i as usize] = state.read_heap_byte(pointer.memory_page, start + i); + } + result +} diff --git a/core/lib/multivm/src/versions/vm_fast/version.rs b/core/lib/multivm/src/versions/vm_fast/version.rs new file mode 100644 index 000000000000..8da180d8ba59 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/version.rs @@ -0,0 +1,28 @@ +use crate::{vm_latest::MultiVmSubversion, VmVersion}; + +#[derive(Debug, Copy, Clone)] +pub(crate) enum FastVmVersion { + IncreasedBootloaderMemory, + Gateway, +} + +impl From for MultiVmSubversion { + fn from(value: FastVmVersion) -> Self { + match value { + FastVmVersion::IncreasedBootloaderMemory => Self::IncreasedBootloaderMemory, + FastVmVersion::Gateway => Self::Gateway, + } + } +} + +impl TryFrom for FastVmVersion { + type Error = (); + + fn try_from(value: VmVersion) -> Result { + match value { + VmVersion::Vm1_5_0IncreasedBootloaderMemory => Ok(Self::IncreasedBootloaderMemory), + VmVersion::VmGateway => Ok(Self::Gateway), + _ => Err(()), + } + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 39c9b3c56566..c935b1c0e7f5 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -1,22 +1,24 @@ -use std::{collections::HashMap, fmt, mem}; +use std::{collections::HashMap, fmt, mem, rc::Rc}; use zk_evm_1_5_0::{ aux_structures::LogQuery, zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION, }; use zksync_contracts::SystemContractCode; use zksync_types::{ + bytecode::BytecodeHash, + h256_to_u256, l1::is_l1_tx_type, l2_to_l1_log::UserL2ToL1Log, + u256_to_h256, utils::key_for_eth_balance, writes::{ compression::compress_with_best_strategy, StateDiffRecord, BYTES_PER_DERIVED_KEY, BYTES_PER_ENUMERATION_INDEX, }, AccountTreeId, StorageKey, StorageLog, StorageLogKind, StorageLogWithPreviousValue, - BOOTLOADER_ADDRESS, H160, H256, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, + Transaction, BOOTLOADER_ADDRESS, H160, H256, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, L2_BASE_TOKEN_ADDRESS, U256, }; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use zksync_vm2::{ interface::{CallframeInterface, HeapId, StateInterface, Tracer}, ExecutionEnd, FatPointer, Program, Settings, StorageSlot, VirtualMachine, @@ -26,6 +28,7 @@ use super::{ bootloader_state::{BootloaderState, BootloaderStateSnapshot}, bytecode::compress_bytecodes, circuits_tracer::CircuitsTracer, + evm_deploy_tracer::{DynamicBytecodes, EvmDeployTracer}, hook::Hook, initial_bootloader_memory::bootloader_initial_memory, transaction_data::TransactionData, @@ -33,33 +36,29 @@ use super::{ use crate::{ glue::GlueInto, interface::{ + pubdata::{PubdataBuilder, PubdataInput}, storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, - TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, VmExecutionResultAndLogs, - VmExecutionStatistics, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, - VmTrackingContracts, + ExecutionResult, FinishedL1Batch, Halt, InspectExecutionMode, L1BatchEnv, L2BlockEnv, + PushTransactionResult, Refunds, SystemEnv, TxRevertReason, VmEvent, VmExecutionLogs, + VmExecutionMode, VmExecutionResultAndLogs, VmExecutionStatistics, VmFactory, VmInterface, + VmInterfaceHistoryEnabled, VmRevertReason, VmTrackingContracts, }, - is_supported_by_fast_vm, utils::events::extract_l2tol1logs_from_l1_messenger, vm_fast::{ bootloader_state::utils::{apply_l2_block, apply_pubdata_to_memory}, events::merge_events, - pubdata::PubdataInput, refund::compute_refund, + version::FastVmVersion, }, - vm_latest::{ - constants::{ - get_vm_hook_params_start_position, get_vm_hook_position, OPERATOR_REFUNDS_OFFSET, - TX_GAS_LIMIT_OFFSET, VM_HOOK_PARAMS_COUNT, - }, - MultiVMSubversion, + vm_latest::constants::{ + get_result_success_first_slot, get_vm_hook_params_start_position, get_vm_hook_position, + OPERATOR_REFUNDS_OFFSET, TX_GAS_LIMIT_OFFSET, VM_HOOK_PARAMS_COUNT, }, + VmVersion, }; -const VM_VERSION: MultiVMSubversion = MultiVMSubversion::IncreasedBootloaderMemory; - -type FullTracer = (Tr, CircuitsTracer); +type FullTracer = ((Tr, CircuitsTracer), EvmDeployTracer); #[derive(Debug)] struct VmRunResult { @@ -92,35 +91,45 @@ impl VmRunResult { /// and implement [`Default`] (the latter is necessary to complete batches). [`CircuitsTracer`] is currently always enabled; /// you don't need to specify it explicitly. pub struct Vm { - pub(crate) world: World>, - pub(crate) inner: VirtualMachine, World>>, + pub(super) world: World>, + pub(super) inner: VirtualMachine, World>>, gas_for_account_validation: u32, - pub(crate) bootloader_state: BootloaderState, - pub(crate) batch_env: L1BatchEnv, - pub(crate) system_env: SystemEnv, + pub(super) bootloader_state: BootloaderState, + pub(super) batch_env: L1BatchEnv, + pub(super) system_env: SystemEnv, snapshot: Option, + vm_version: FastVmVersion, #[cfg(test)] enforced_state_diffs: Option>, } -impl Vm { +impl Vm { pub fn custom(batch_env: L1BatchEnv, system_env: SystemEnv, storage: S) -> Self { - assert!( - is_supported_by_fast_vm(system_env.version), - "Protocol version {:?} is not supported by fast VM", - system_env.version - ); - - let default_aa_code_hash = system_env + let vm_version: FastVmVersion = VmVersion::from(system_env.version) + .try_into() + .unwrap_or_else(|_| { + panic!( + "Protocol version {:?} is not supported by fast VM", + system_env.version + ) + }); + + let default_aa_code_hash = system_env.base_system_smart_contracts.default_aa.hash; + let evm_emulator_hash = system_env .base_system_smart_contracts - .default_aa - .hash - .into(); + .evm_emulator + .as_ref() + .map(|evm| evm.hash) + .unwrap_or(system_env.base_system_smart_contracts.default_aa.hash); - let program_cache = HashMap::from([World::convert_system_contract_code( + let mut program_cache = HashMap::from([World::convert_system_contract_code( &system_env.base_system_smart_contracts.default_aa, false, )]); + if let Some(evm_emulator) = &system_env.base_system_smart_contracts.evm_emulator { + let (bytecode_hash, program) = World::convert_system_contract_code(evm_emulator, false); + program_cache.insert(bytecode_hash, program); + } let (_, bootloader) = World::convert_system_contract_code( &system_env.base_system_smart_contracts.bootloader, @@ -135,10 +144,9 @@ impl Vm { &[], system_env.bootloader_gas_limit, Settings { - default_aa_code_hash, - // this will change after 1.5 - evm_interpreter_code_hash: default_aa_code_hash, - hook_address: get_vm_hook_position(VM_VERSION) * 32, + default_aa_code_hash: default_aa_code_hash.into(), + evm_interpreter_code_hash: evm_emulator_hash.into(), + hook_address: get_vm_hook_position(vm_version.into()) * 32, }, ); @@ -158,10 +166,12 @@ impl Vm { system_env.execution_mode, bootloader_memory.clone(), batch_env.first_l2_block, + system_env.version, ), system_env, batch_env, snapshot: None, + vm_version, #[cfg(test)] enforced_state_diffs: None, }; @@ -172,8 +182,9 @@ impl Vm { fn run( &mut self, execution_mode: VmExecutionMode, - tracer: &mut (Tr, CircuitsTracer), + tracer: &mut FullTracer, track_refunds: bool, + pubdata_builder: Option<&dyn PubdataBuilder>, ) -> VmRunResult { let mut refunds = Refunds { gas_refunded: 0, @@ -204,6 +215,16 @@ impl Vm { }; break (ExecutionResult::Halt { reason }, true); } + ExecutionEnd::StoppedByTracer => { + break ( + ExecutionResult::Halt { + reason: Halt::TracerCustom( + "Unexpectedly stopped by tracer".to_string(), + ), + }, + false, + ); + } }; match Hook::from_u32(hook) { @@ -212,7 +233,22 @@ impl Vm { } Hook::TxHasEnded => { if let VmExecutionMode::OneTx = execution_mode { - break (last_tx_result.take().unwrap(), false); + // The bootloader may invoke `TxHasEnded` hook without posting a tx result previously. One case when this can happen + // is estimating gas for L1 transactions, if a transaction runs out of gas during execution. + let tx_result = last_tx_result.take().unwrap_or_else(|| { + let tx_has_failed = self.get_tx_result().is_zero(); + if tx_has_failed { + let output = VmRevertReason::General { + msg: "Transaction reverted with empty reason. Possibly out of gas" + .to_string(), + data: vec![], + }; + ExecutionResult::Revert { output } + } else { + ExecutionResult::Success { output: vec![] } + } + }); + break (tx_result, false); } } Hook::AskOperatorForRefund => { @@ -319,15 +355,19 @@ impl Vm { state_diffs: self.compute_state_diffs(), }; - // Save the pubdata for the future initial bootloader memory building - self.bootloader_state - .set_pubdata_input(pubdata_input.clone()); - // Apply the pubdata to the current memory let mut memory_to_apply = vec![]; - apply_pubdata_to_memory(&mut memory_to_apply, pubdata_input); + apply_pubdata_to_memory( + &mut memory_to_apply, + pubdata_builder.expect("`pubdata_builder` is required to finish batch"), + &pubdata_input, + self.system_env.version, + ); self.write_to_bootloader_heap(memory_to_apply); + + // Save the pubdata for the future initial bootloader memory building + self.bootloader_state.set_pubdata_input(pubdata_input); } Hook::PaymasterValidationEntered | Hook::ValidationStepEnded => { /* unused */ } @@ -352,14 +392,20 @@ impl Vm { } fn get_hook_params(&self) -> [U256; 3] { - (get_vm_hook_params_start_position(VM_VERSION) - ..get_vm_hook_params_start_position(VM_VERSION) + VM_HOOK_PARAMS_COUNT) + (get_vm_hook_params_start_position(self.vm_version.into()) + ..get_vm_hook_params_start_position(self.vm_version.into()) + VM_HOOK_PARAMS_COUNT) .map(|word| self.read_word_from_bootloader_heap(word as usize)) .collect::>() .try_into() .unwrap() } + fn get_tx_result(&self) -> U256 { + let tx_idx = self.bootloader_state.current_tx(); + let slot = get_result_success_first_slot(self.vm_version.into()) as usize + tx_idx; + self.read_word_from_bootloader_heap(slot) + } + fn get_debug_log(&self) -> (String, String) { let hook_params = self.get_hook_params(); let mut msg = u256_to_h256(hook_params[0]).as_bytes().to_vec(); @@ -417,7 +463,7 @@ impl Vm { pub(crate) fn insert_bytecodes<'a>(&mut self, bytecodes: impl IntoIterator) { for code in bytecodes { - let hash = h256_to_u256(hash_bytecode(code)); + let hash = BytecodeHash::for_bytecode(code).value_u256(); self.world.bytecode_cache.insert(hash, code.into()); } } @@ -533,34 +579,12 @@ impl Vm { pubdata_costs: world_diff.pubdata_costs().to_vec(), } } -} - -impl VmFactory> for Vm, Tr> -where - S: ReadStorage, - Tr: Tracer + Default + 'static, -{ - fn new( - batch_env: L1BatchEnv, - system_env: SystemEnv, - storage: StoragePtr>, - ) -> Self { - let storage = ImmutableStorageView::new(storage); - Self::custom(batch_env, system_env, storage) - } -} - -impl VmInterface for Vm { - type TracerDispatcher = Tr; - - fn push_transaction(&mut self, tx: zksync_types::Transaction) { - self.push_transaction_inner(tx, 0, true); - } - fn inspect( + pub(crate) fn inspect_inner( &mut self, - tracer: &mut Self::TracerDispatcher, + tracer: &mut Tr, execution_mode: VmExecutionMode, + pubdata_builder: Option<&dyn PubdataBuilder>, ) -> VmExecutionResultAndLogs { let mut track_refunds = false; if matches!(execution_mode, VmExecutionMode::OneTx) { @@ -572,9 +596,18 @@ impl VmInterface for Vm { let start = self.inner.world_diff().snapshot(); let gas_before = self.gas_remaining(); - let mut full_tracer = (mem::take(tracer), CircuitsTracer::default()); - let result = self.run(execution_mode, &mut full_tracer, track_refunds); - *tracer = full_tracer.0; // place the tracer back + let mut full_tracer = ( + (mem::take(tracer), CircuitsTracer::default()), + EvmDeployTracer::new(self.world.dynamic_bytecodes.clone()), + ); + let result = self.run( + execution_mode, + &mut full_tracer, + track_refunds, + pubdata_builder, + ); + let ((external_tracer, circuits_tracer), _) = full_tracer; + *tracer = external_tracer; // place the tracer back let ignore_world_diff = matches!(execution_mode, VmExecutionMode::OneTx) && result.should_ignore_vm_logs(); @@ -631,6 +664,13 @@ impl VmInterface for Vm { let gas_remaining = self.gas_remaining(); let gas_used = gas_before - gas_remaining; + // We need to filter out bytecodes the deployment of which may have been reverted; the tracer is not aware of reverts. + // To do this, we check bytecodes against deployer events. + let factory_deps_marked_as_known = VmEvent::extract_bytecodes_marked_as_known(&logs.events); + let dynamic_factory_deps = self + .world + .decommit_dynamic_bytecodes(factory_deps_marked_as_known); + VmExecutionResultAndLogs { result: result.execution_result, logs, @@ -640,15 +680,52 @@ impl VmInterface for Vm { gas_remaining, computational_gas_used: gas_used, // since 1.5.0, this always has the same value as `gas_used` pubdata_published: result.pubdata_published, - circuit_statistic: full_tracer.1.circuit_statistic(), + circuit_statistic: circuits_tracer.circuit_statistic(), contracts_used: 0, cycles_used: 0, total_log_queries: 0, }, refunds: result.refunds, - new_known_factory_deps: None, + dynamic_factory_deps, } } +} + +impl VmFactory> for Vm, Tr> +where + S: ReadStorage, + Tr: Tracer + Default + 'static, +{ + fn new( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + ) -> Self { + let storage = ImmutableStorageView::new(storage); + Self::custom(batch_env, system_env, storage) + } +} + +impl VmInterface for Vm { + type TracerDispatcher = Tr; + + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + self.push_transaction_inner(tx, 0, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } + } + + fn inspect( + &mut self, + tracer: &mut Self::TracerDispatcher, + execution_mode: InspectExecutionMode, + ) -> VmExecutionResultAndLogs { + self.inspect_inner(tracer, execution_mode.into(), None) + } fn inspect_transaction_with_bytecode_compression( &mut self, @@ -657,7 +734,7 @@ impl VmInterface for Vm { with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_inner(tx, 0, with_compression); - let result = self.inspect(tracer, VmExecutionMode::OneTx); + let result = self.inspect(tracer, InspectExecutionMode::OneTx); let compression_result = if self.has_unpublished_bytecodes() { Err(BytecodeCompressionError::BytecodeCompressionFailed) @@ -674,19 +751,23 @@ impl VmInterface for Vm { self.bootloader_state.start_new_l2_block(l2_block_env) } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut Tr::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner( + &mut Tr::default(), + VmExecutionMode::Batch, + Some(pubdata_builder.as_ref()), + ); let execution_state = self.get_current_execution_state(); - let bootloader_memory = self.bootloader_state.bootloader_memory(); + let bootloader_memory = self + .bootloader_state + .bootloader_memory(pubdata_builder.as_ref()); FinishedL1Batch { block_tip_execution_result: result, final_execution_state: execution_state, final_bootloader_memory: Some(bootloader_memory), pubdata_input: Some( self.bootloader_state - .get_pubdata_information() - .clone() - .build_pubdata(false), + .settlement_layer_pubdata(pubdata_builder.as_ref()), ), state_diffs: Some( self.bootloader_state @@ -761,6 +842,7 @@ impl fmt::Debug for Vm { #[derive(Debug)] pub(crate) struct World { pub(crate) storage: S, + dynamic_bytecodes: DynamicBytecodes, program_cache: HashMap>, pub(crate) bytecode_cache: HashMap>, } @@ -769,8 +851,9 @@ impl World { fn new(storage: S, program_cache: HashMap>) -> Self { Self { storage, + dynamic_bytecodes: DynamicBytecodes::default(), program_cache, - bytecode_cache: Default::default(), + bytecode_cache: HashMap::default(), } } @@ -780,9 +863,22 @@ impl World { ) -> (U256, Program) { ( h256_to_u256(code.hash), - Program::from_words(code.code.clone(), is_bootloader), + Program::new(&code.code, is_bootloader), ) } + + fn decommit_dynamic_bytecodes( + &self, + candidate_hashes: impl Iterator, + ) -> HashMap> { + let bytecodes = candidate_hashes.filter_map(|hash| { + let bytecode = self + .dynamic_bytecodes + .map(h256_to_u256(hash), <[u8]>::to_vec)?; + Some((hash, bytecode)) + }); + bytecodes.collect() + } } impl zksync_vm2::StorageInterface for World { @@ -836,17 +932,47 @@ impl zksync_vm2::StorageInterface for World { } } +/// It may look like that an append-only cache for EVM bytecodes / `Program`s can lead to the following scenario: +/// +/// 1. A transaction deploys an EVM bytecode with hash `H`, then reverts. +/// 2. A following transaction in the same VM run queries a bytecode with hash `H` and gets it. +/// +/// This would be incorrect behavior because bytecode deployments must be reverted along with transactions. +/// +/// In reality, this cannot happen because both `decommit()` and `decommit_code()` calls perform storage-based checks +/// before a decommit: +/// +/// - `decommit_code()` is called from the `CodeOracle` system contract, which checks that the decommitted bytecode is known. +/// - `decommit()` is called during far calls, which obtains address -> bytecode hash mapping beforehand. +/// +/// Thus, if storage is reverted correctly, additional EVM bytecodes occupy the cache, but are unreachable. impl zksync_vm2::World for World { fn decommit(&mut self, hash: U256) -> Program { self.program_cache .entry(hash) .or_insert_with(|| { - let bytecode = self.bytecode_cache.entry(hash).or_insert_with(|| { - self.storage + let cached = self + .bytecode_cache + .get(&hash) + .map(|code| Program::new(code, false)) + .or_else(|| { + self.dynamic_bytecodes + .map(hash, |code| Program::new(code, false)) + }); + + if let Some(cached) = cached { + cached + } else { + let code = self + .storage .load_factory_dep(u256_to_h256(hash)) - .expect("vm tried to decommit nonexistent bytecode") - }); - Program::new(bytecode, false) + .unwrap_or_else(|| { + panic!("VM tried to decommit nonexistent bytecode: {hash:?}"); + }); + let program = Program::new(&code, false); + self.bytecode_cache.insert(hash, code); + program + } }) .clone() } diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/l2_block.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/l2_block.rs index 103c5d16540e..95502b8dc60c 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/l2_block.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/l2_block.rs @@ -1,7 +1,6 @@ use std::cmp::Ordering; -use zksync_types::{L2BlockNumber, H256}; -use zksync_utils::concat_and_hash; +use zksync_types::{web3::keccak256_concat, L2BlockNumber, H256}; use crate::{ interface::{L2Block, L2BlockEnv}, @@ -53,7 +52,7 @@ impl BootloaderL2Block { } fn update_rolling_hash(&mut self, tx_hash: H256) { - self.txs_rolling_hash = concat_and_hash(self.txs_rolling_hash, tx_hash) + self.txs_rolling_hash = keccak256_concat(self.txs_rolling_hash, tx_hash) } pub(crate) fn make_snapshot(&self) -> L2BlockSnapshot { diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs index 4ba27b14bad6..2085bbaba31f 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs @@ -1,11 +1,15 @@ use std::cmp::Ordering; use once_cell::sync::OnceCell; -use zksync_types::{L2ChainId, U256}; +use zksync_types::{L2ChainId, ProtocolVersionId, U256}; +use zksync_vm_interface::pubdata::PubdataBuilder; use super::{tx::BootloaderTx, utils::apply_pubdata_to_memory}; use crate::{ - interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, + interface::{ + pubdata::PubdataInput, BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, + TxExecutionMode, + }, vm_latest::{ bootloader_state::{ l2_block::BootloaderL2Block, @@ -13,7 +17,7 @@ use crate::{ utils::{apply_l2_block, apply_tx_to_memory}, }, constants::TX_DESCRIPTION_OFFSET, - types::internals::{PubdataInput, TransactionData}, + types::internals::TransactionData, utils::l2_blocks::assert_next_block, }, }; @@ -45,6 +49,8 @@ pub struct BootloaderState { free_tx_offset: usize, /// Information about the the pubdata that will be needed to supply to the L1Messenger pubdata_information: OnceCell, + /// Protocol version. + protocol_version: ProtocolVersionId, } impl BootloaderState { @@ -52,6 +58,7 @@ impl BootloaderState { execution_mode: TxExecutionMode, initial_memory: BootloaderMemory, first_l2_block: L2BlockEnv, + protocol_version: ProtocolVersionId, ) -> Self { let l2_block = BootloaderL2Block::new(first_l2_block, 0); Self { @@ -62,6 +69,7 @@ impl BootloaderState { execution_mode, free_tx_offset: 0, pubdata_information: Default::default(), + protocol_version, } } @@ -135,18 +143,31 @@ impl BootloaderState { pub(crate) fn last_l2_block(&self) -> &BootloaderL2Block { self.l2_blocks.last().unwrap() } + pub(crate) fn get_pubdata_information(&self) -> &PubdataInput { self.pubdata_information .get() .expect("Pubdata information is not set") } + pub(crate) fn settlement_layer_pubdata(&self, pubdata_builder: &dyn PubdataBuilder) -> Vec { + let pubdata_information = self + .pubdata_information + .get() + .expect("Pubdata information is not set"); + + pubdata_builder.settlement_layer_pubdata(pubdata_information, self.protocol_version) + } + fn last_mut_l2_block(&mut self) -> &mut BootloaderL2Block { self.l2_blocks.last_mut().unwrap() } /// Apply all bootloader transaction to the initial memory - pub(crate) fn bootloader_memory(&self) -> BootloaderMemory { + pub(crate) fn bootloader_memory( + &self, + pubdata_builder: &dyn PubdataBuilder, + ) -> BootloaderMemory { let mut initial_memory = self.initial_memory.clone(); let mut offset = 0; let mut compressed_bytecodes_offset = 0; @@ -174,11 +195,15 @@ impl BootloaderState { let pubdata_information = self .pubdata_information - .clone() - .into_inner() + .get() .expect("Empty pubdata information"); - apply_pubdata_to_memory(&mut initial_memory, pubdata_information); + apply_pubdata_to_memory( + &mut initial_memory, + pubdata_builder, + pubdata_information, + self.protocol_version, + ); initial_memory } @@ -291,4 +316,8 @@ impl BootloaderState { ); } } + + pub(crate) fn protocol_version(&self) -> ProtocolVersionId { + self.protocol_version + } } diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs index 23c079202c1f..58dc20346a6f 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs @@ -1,9 +1,11 @@ -use zksync_types::{ethabi, U256}; -use zksync_utils::{bytes_to_be_words, h256_to_u256}; +use zksync_types::{ethabi, h256_to_u256, ProtocolVersionId, U256}; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + interface::{ + pubdata::{PubdataBuilder, PubdataInput}, + BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode, + }, utils::bytecode, vm_latest::{ bootloader_state::l2_block::BootloaderL2Block, @@ -14,7 +16,6 @@ use crate::{ TX_DESCRIPTION_OFFSET, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, TX_OVERHEAD_OFFSET, TX_TRUSTED_GAS_LIMIT_OFFSET, }, - types::internals::PubdataInput, }, }; @@ -25,8 +26,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( .iter() .flat_map(bytecode::encode_call) .collect(); - - bytes_to_be_words(memory_addition) + bytecode::bytes_to_be_words(&memory_addition) } #[allow(clippy::too_many_arguments)] @@ -124,26 +124,61 @@ fn apply_l2_block_inner( ]) } +fn bootloader_memory_input( + pubdata_builder: &dyn PubdataBuilder, + input: &PubdataInput, + protocol_version: ProtocolVersionId, +) -> Vec { + let l2_da_validator_address = pubdata_builder.l2_da_validator(); + let operator_input = pubdata_builder.l1_messenger_operator_input(input, protocol_version); + + ethabi::encode(&[ + ethabi::Token::Address(l2_da_validator_address), + ethabi::Token::Bytes(operator_input), + ]) +} + pub(crate) fn apply_pubdata_to_memory( memory: &mut BootloaderMemory, - pubdata_information: PubdataInput, + pubdata_builder: &dyn PubdataBuilder, + pubdata_information: &PubdataInput, + protocol_version: ProtocolVersionId, ) { - // Skipping two slots as they will be filled by the bootloader itself: - // - One slot is for the selector of the call to the L1Messenger. - // - The other slot is for the 0x20 offset for the calldata. - let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 2; - - // Need to skip first word as it represents array offset - // while bootloader expects only [len || data] - let pubdata = ethabi::encode(&[ethabi::Token::Bytes( - pubdata_information.build_pubdata(true), - )])[32..] - .to_vec(); - - assert!( - pubdata.len() / 32 <= OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS - 2, - "The encoded pubdata is too big" - ); + let (l1_messenger_pubdata_start_slot, pubdata) = if protocol_version.is_pre_gateway() { + // Skipping two slots as they will be filled by the bootloader itself: + // - One slot is for the selector of the call to the L1Messenger. + // - The other slot is for the 0x20 offset for the calldata. + let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 2; + + // Need to skip first word as it represents array offset + // while bootloader expects only [len || data] + let pubdata = ethabi::encode(&[ethabi::Token::Bytes( + pubdata_builder.l1_messenger_operator_input(pubdata_information, protocol_version), + )])[32..] + .to_vec(); + + assert!( + pubdata.len() / 32 <= OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS - 2, + "The encoded pubdata is too big" + ); + + (l1_messenger_pubdata_start_slot, pubdata) + } else { + // Skipping the first slot as it will be filled by the bootloader itself: + // It is for the selector of the call to the L1Messenger. + let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 1; + + let pubdata = + bootloader_memory_input(pubdata_builder, pubdata_information, protocol_version); + + assert!( + // Note that unlike the previous version, the difference is `1`, since now it also includes the offset + pubdata.len() / 32 < OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS, + "The encoded pubdata is too big" + ); + + (l1_messenger_pubdata_start_slot, pubdata) + }; pubdata .chunks(32) diff --git a/core/lib/multivm/src/versions/vm_latest/constants.rs b/core/lib/multivm/src/versions/vm_latest/constants.rs index 01f697ec91a2..c95771f9e849 100644 --- a/core/lib/multivm/src/versions/vm_latest/constants.rs +++ b/core/lib/multivm/src/versions/vm_latest/constants.rs @@ -5,7 +5,7 @@ pub use zk_evm_1_5_0::zkevm_opcode_defs::system_params::{ }; use zksync_system_constants::MAX_NEW_FACTORY_DEPS; -use super::vm::MultiVMSubversion; +use super::vm::MultiVmSubversion; use crate::vm_latest::old_vm::utils::heap_page_from_base; /// The amount of ergs to be reserved at the end of the batch to ensure that it has enough ergs to verify compression, etc. @@ -22,14 +22,15 @@ pub(crate) const MAX_BASE_LAYER_CIRCUITS: usize = 34100; /// the requirements on RAM. /// In this version of the VM the used bootloader memory bytes has increased from `30_000_000` to `59_000_000`, /// and then to `63_800_000` in a subsequent upgrade. -pub(crate) const fn get_used_bootloader_memory_bytes(subversion: MultiVMSubversion) -> usize { +pub(crate) const fn get_used_bootloader_memory_bytes(subversion: MultiVmSubversion) -> usize { match subversion { - MultiVMSubversion::SmallBootloaderMemory => 59_000_000, - MultiVMSubversion::IncreasedBootloaderMemory => 63_800_000, + MultiVmSubversion::SmallBootloaderMemory => 59_000_000, + MultiVmSubversion::IncreasedBootloaderMemory => 63_800_000, + MultiVmSubversion::Gateway => 63_800_000, } } -pub(crate) const fn get_used_bootloader_memory_words(subversion: MultiVMSubversion) -> usize { +pub(crate) const fn get_used_bootloader_memory_words(subversion: MultiVmSubversion) -> usize { get_used_bootloader_memory_bytes(subversion) / 32 } @@ -104,7 +105,7 @@ pub(crate) const BOOTLOADER_TX_DESCRIPTION_OFFSET: usize = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS; /// The size of the bootloader memory dedicated to the encodings of transactions -pub(crate) const fn get_bootloader_tx_encoding_space(subversion: MultiVMSubversion) -> u32 { +pub(crate) const fn get_bootloader_tx_encoding_space(subversion: MultiVmSubversion) -> u32 { (get_used_bootloader_memory_words(subversion) - TX_DESCRIPTION_OFFSET - MAX_TXS_IN_BATCH) as u32 } @@ -128,21 +129,21 @@ pub const BOOTLOADER_HEAP_PAGE: u32 = heap_page_from_base(MemoryPage(INITIAL_BAS /// So the layout looks like this: /// `[param 0][param 1][param 2][vmhook opcode]` pub const VM_HOOK_PARAMS_COUNT: u32 = 3; -pub(crate) const fn get_vm_hook_position(subversion: MultiVMSubversion) -> u32 { +pub(crate) const fn get_vm_hook_position(subversion: MultiVmSubversion) -> u32 { get_result_success_first_slot(subversion) - 1 } -pub(crate) const fn get_vm_hook_params_start_position(subversion: MultiVMSubversion) -> u32 { +pub(crate) const fn get_vm_hook_params_start_position(subversion: MultiVmSubversion) -> u32 { get_vm_hook_position(subversion) - VM_HOOK_PARAMS_COUNT } /// Method that provides the start position of the vm hook in the memory for the latest version of v1.5.0. /// This method is used only in `test_infra` in the bootloader tests and that's why it should be exposed. pub const fn get_vm_hook_start_position_latest() -> u32 { - get_vm_hook_params_start_position(MultiVMSubversion::IncreasedBootloaderMemory) + get_vm_hook_params_start_position(MultiVmSubversion::IncreasedBootloaderMemory) } /// Arbitrary space in memory closer to the end of the page -pub(crate) const fn get_result_success_first_slot(subversion: MultiVMSubversion) -> u32 { +pub(crate) const fn get_result_success_first_slot(subversion: MultiVmSubversion) -> u32 { ((get_used_bootloader_memory_bytes(subversion) as u32) - (MAX_TXS_IN_BATCH as u32) * 32) / 32 } @@ -201,6 +202,6 @@ pub(crate) const TX_SLOT_OVERHEAD_GAS: u32 = 10_000; /// getting often sealed due to the memory limit being reached, the L2 fair gas price will be increased. pub(crate) const TX_MEMORY_OVERHEAD_GAS: u32 = 10; -const ZK_SYNC_BYTES_PER_BLOB: usize = BLOB_CHUNK_SIZE * ELEMENTS_PER_4844_BLOCK; +pub(crate) const ZK_SYNC_BYTES_PER_BLOB: usize = BLOB_CHUNK_SIZE * ELEMENTS_PER_4844_BLOCK; pub const MAX_BLOBS_PER_BATCH: usize = 6; pub const MAX_VM_PUBDATA_PER_BATCH: usize = MAX_BLOBS_PER_BATCH * ZK_SYNC_BYTES_PER_BLOB; diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs index 2cd98c8e58a3..655f55bc8fc2 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs @@ -1,13 +1,12 @@ use itertools::Itertools; -use zksync_types::U256; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; +use zksync_types::{bytecode::BytecodeHash, U256}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, CompressedBytecodeInfo, }, - utils::bytecode, + utils::{bytecode, bytecode::bytes_to_be_words}, vm_latest::Vm, HistoryMode, }; @@ -25,18 +24,15 @@ impl Vm { .storage .get_ptr() .borrow_mut() - .is_bytecode_known(&hash_bytecode(&info.original)) + .is_bytecode_known(&BytecodeHash::for_bytecode(&info.original).value()) }) } } /// Converts bytecode to tokens and hashes it. pub(crate) fn bytecode_to_factory_dep(bytecode: Vec) -> (U256, Vec) { - let bytecode_hash = hash_bytecode(&bytecode); - let bytecode_hash = U256::from_big_endian(bytecode_hash.as_bytes()); - - let bytecode_words = bytes_to_be_words(bytecode); - + let bytecode_hash = BytecodeHash::for_bytecode(&bytecode).value_u256(); + let bytecode_words = bytes_to_be_words(&bytecode); (bytecode_hash, bytecode_words) } @@ -49,7 +45,11 @@ pub(crate) fn compress_bytecodes( .enumerate() .sorted_by_key(|(_idx, dep)| *dep) .dedup_by(|x, y| x.1 == y.1) - .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) + .filter(|(_idx, dep)| { + !storage + .borrow_mut() + .is_bytecode_known(&BytecodeHash::for_bytecode(dep).value()) + }) .sorted_by_key(|(idx, _dep)| *idx) .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs index e70f05f85ef2..f8acfaec4259 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs @@ -1,6 +1,7 @@ use std::mem; use zk_evm_1_5_0::aux_structures::Timestamp; +use zksync_vm_interface::VmEvent; use crate::{ interface::{ @@ -14,7 +15,6 @@ use crate::{ circuits_capacity::circuit_statistic_from_cycles, dispatcher::TracerDispatcher, DefaultExecutionTracer, PubdataTracer, RefundsTracer, }, - utils::extract_bytecodes_marked_as_known, vm::Vm, }, HistoryMode, @@ -69,6 +69,7 @@ impl Vm { self.batch_env.clone(), execution_mode, self.subversion, + None, )) }), self.subversion, @@ -100,8 +101,8 @@ impl Vm { circuit_statistic_from_cycles(tx_tracer.circuits_tracer.statistics), ); let result = tx_tracer.result_tracer.into_result(); - let factory_deps_marked_as_known = extract_bytecodes_marked_as_known(&logs.events); - let new_known_factory_deps = self.decommit_bytecodes(&factory_deps_marked_as_known); + let factory_deps_marked_as_known = VmEvent::extract_bytecodes_marked_as_known(&logs.events); + let dynamic_factory_deps = self.decommit_dynamic_bytecodes(factory_deps_marked_as_known); *dispatcher = tx_tracer.dispatcher; let result = VmExecutionResultAndLogs { @@ -109,7 +110,7 @@ impl Vm { logs, statistics, refunds, - new_known_factory_deps: Some(new_known_factory_deps), + dynamic_factory_deps, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_latest/mod.rs b/core/lib/multivm/src/versions/vm_latest/mod.rs index 211c527c3816..46f8db789ddc 100644 --- a/core/lib/multivm/src/versions/vm_latest/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/mod.rs @@ -1,4 +1,4 @@ -pub(crate) use self::vm::MultiVMSubversion; +pub(crate) use self::vm::MultiVmSubversion; pub use self::{ bootloader_state::BootloaderState, old_vm::{ diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/events.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/events.rs index fd6f393155d7..bded254c7fcc 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/events.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/events.rs @@ -1,8 +1,7 @@ use zk_evm_1_5_0::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use zksync_types::{h256_to_address, L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use crate::interface::VmEvent; +use crate::{interface::VmEvent, utils::bytecode::be_chunks_to_h256_words}; #[derive(Clone)] pub(crate) struct SolidityLikeEvent { @@ -135,7 +134,7 @@ pub(crate) fn merge_events(events: Vec) -> Vec .filter(|e| e.address == EVENT_WRITER_ADDRESS) .map(|event| { // The events writer events where the first topic is the actual address of the event and the rest of the topics are real topics - let address = h256_to_account_address(&H256(event.topics[0])); + let address = h256_to_address(&H256(event.topics[0])); let topics = event.topics.into_iter().skip(1).collect(); SolidityLikeEvent { diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/history_recorder.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/history_recorder.rs index e7277f38289d..9dac6480dc57 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/history_recorder.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/history_recorder.rs @@ -5,8 +5,7 @@ use zk_evm_1_5_0::{ vm_state::PrimitiveValue, zkevm_opcode_defs::{self}, }; -use zksync_types::{StorageKey, H256, U256}; -use zksync_utils::{h256_to_u256, u256_to_h256}; +use zksync_types::{h256_to_u256, u256_to_h256, StorageKey, H256, U256}; use crate::interface::storage::{StoragePtr, WriteStorage}; diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs index d91fbfdb24df..1afa9b483ec5 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs @@ -1,4 +1,7 @@ -use std::{collections::HashMap, fmt::Debug}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, +}; use zk_evm_1_5_0::{ abstractions::{DecommittmentProcessor, Memory, MemoryType}, @@ -7,12 +10,12 @@ use zk_evm_1_5_0::{ }, zkevm_opcode_defs::{VersionedHashHeader, VersionedHashNormalizedPreimage}, }; -use zksync_types::{H256, U256}; -use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; +use zksync_types::{h256_to_u256, u256_to_h256, H256, U256}; use super::OracleWithHistory; use crate::{ interface::storage::{ReadStorage, StoragePtr}, + utils::bytecode::bytes_to_be_words, vm_latest::old_vm::history_recorder::{ HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory, }, @@ -27,6 +30,9 @@ pub struct DecommitterOracle { /// The cache of bytecodes that the bootloader "knows", but that are not necessarily in the database. /// And it is also used as a database cache. pub known_bytecodes: HistoryRecorder>, H>, + /// Subset of `known_bytecodes` that are dynamically deployed during VM execution. Currently, + /// only EVM bytecodes can be deployed like that. + pub dynamic_bytecode_hashes: HashSet, /// Stores pages of memory where certain code hashes have already been decommitted. /// It is expected that they all are present in the DB. // `decommitted_code_hashes` history is necessary @@ -40,6 +46,7 @@ impl DecommitterOracle { Self { storage, known_bytecodes: HistoryRecorder::default(), + dynamic_bytecode_hashes: HashSet::default(), decommitted_code_hashes: HistoryRecorder::default(), decommitment_requests: HistoryRecorder::default(), } @@ -62,7 +69,7 @@ impl DecommitterOracle { .load_factory_dep(u256_to_h256(hash)) .unwrap_or_else(|| panic!("Trying to decommit unexisting hash: {}", hash)); - let value = bytes_to_be_words(value); + let value = bytes_to_be_words(&value); self.known_bytecodes.insert(hash, value.clone(), timestamp); value } @@ -76,6 +83,17 @@ impl DecommitterOracle { } } + pub fn insert_dynamic_bytecode( + &mut self, + bytecode_hash: U256, + bytecode: Vec, + timestamp: Timestamp, + ) { + self.dynamic_bytecode_hashes.insert(bytecode_hash); + self.known_bytecodes + .insert(bytecode_hash, bytecode, timestamp); + } + pub fn get_decommitted_bytecodes_after_timestamp(&self, timestamp: Timestamp) -> usize { // Note, that here we rely on the fact that for each used bytecode // there is one and only one corresponding event in the history of it. diff --git a/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs b/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs index 9c7b68c1ad51..242cdc6a2239 100644 --- a/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs @@ -10,6 +10,7 @@ use zk_evm_1_5_0::{ }, }; use zksync_types::{ + h256_to_u256, u256_to_h256, utils::storage_key_for_eth_balance, writes::{ compression::compress_with_best_strategy, BYTES_PER_DERIVED_KEY, @@ -17,7 +18,6 @@ use zksync_types::{ }, AccountTreeId, Address, StorageKey, StorageLogKind, BOOTLOADER_ADDRESS, U256, }; -use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::{ glue::GlueInto, @@ -620,8 +620,7 @@ fn get_pubdata_price_bytes(initial_value: U256, final_value: U256, is_initial: b #[cfg(test)] mod tests { - use zksync_types::H256; - use zksync_utils::h256_to_u256; + use zksync_types::{h256_to_u256, H256}; use super::*; use crate::interface::storage::{InMemoryStorage, StorageView}; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs index 9909ca24937f..df4a36f2d3dd 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs @@ -1,428 +1,9 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use itertools::Itertools; -use zk_evm_1_5_0::aux_structures::Timestamp; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, fee_model::BatchFeeInput, get_code_key, - l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; use crate::{ - interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - constants::{ - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, - MAX_VM_PUBDATA_PER_BATCH, - }, - tests::tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTesterBuilder, - }, - tracers::PubdataTracer, - HistoryEnabled, TracerDispatcher, - }, + versions::testonly::block_tip::test_dry_run_upper_bound, + vm_latest::{HistoryEnabled, Vm}, }; -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -const CALLS_PER_TX: usize = 1_000; -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec> { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .chunks(CALLS_PER_TX) - .into_iter() - .map(|chunk| { - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(chunk.collect_vec())]) - .unwrap() - }) - .collect_vec(); - - encoded_calls -} - -struct TestStatistics { - pub max_used_gas: u32, - pub circuit_statistics: u64, - pub execution_metrics_size: u64, -} - -struct StatisticsTagged { - pub statistics: TestStatistics, - pub tag: String, -} - -fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - // We are measuring computational cost, so prices for pubdata don't matter, while they artificially dilute - // the gas limit - - let batch_env = L1BatchEnv { - fee_input: BatchFeeInput::pubdata_independent(100_000, 100_000, 100_000), - ..default_l1_batch(zksync_types::L1BatchNumber(1)) - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_l1_batch_env(batch_env) - .build(); - - let bytecodes = test_data - .bytecodes - .iter() - .map(|bytecode| { - let hash = hash_bytecode(bytecode); - let words = bytes_to_be_words(bytecode.clone()); - (h256_to_u256(hash), words) - }) - .collect(); - vm.vm - .state - .decommittment_processor - .populate(bytecodes, Timestamp(0)); - - let txs_data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - - for (i, data) in txs_data.into_iter().enumerate() { - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), - calldata: data, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction {i} wasn't successful for input: {:#?}", - test_data - ); - } - - // Now we count how much ergs were spent at the end of the batch - // It is assumed that the top level frame is the bootloader - - let ergs_before = vm.vm.state.local_state.callstack.current.ergs_remaining; - - // We ensure that indeed the provided state diffs are used - let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( - vm.vm.batch_env.clone(), - VmExecutionMode::Batch, - test_data.state_diffs.clone(), - crate::vm_latest::MultiVMSubversion::latest(), - ); - - let result = vm.vm.inspect_inner( - &mut TracerDispatcher::default(), - VmExecutionMode::Batch, - Some(pubdata_tracer), - ); - - assert!( - !result.result.is_failed(), - "Batch wasn't successful for input: {:?}", - test_data - ); - - let ergs_after = vm.vm.state.local_state.callstack.current.ergs_remaining; - - assert_eq!( - (ergs_before - ergs_after) as u64, - result.statistics.gas_used - ); - - TestStatistics { - max_used_gas: ergs_before - ergs_after, - circuit_statistics: result.statistics.circuit_statistic.total() as u64, - execution_metrics_size: result.get_execution_metrics(None).size() as u64, - } -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -// A valid zkEVM bytecode has odd number of 32 byte words -fn get_valid_bytecode_length(length: usize) -> usize { - // Firstly ensure that the length is divisible by 32 - let length_padded_to_32 = if length % 32 == 0 { - length - } else { - length + 32 - (length % 32) - }; - - // Then we ensure that the number returned by division by 32 is odd - if length_padded_to_32 % 64 == 0 { - length_padded_to_32 + 32 - } else { - length_padded_to_32 - } -} - #[test] -fn test_dry_run_upper_bound() { - // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). - // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` - // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. - const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = - (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; - - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let statistics = vec![ - // max logs - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, - ..Default::default() - }), - tag: "max_logs".to_string(), - }, - // max messages - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, - // so the max number of pubdata is bound by it - messages: vec![ - vec![0; 0]; - MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) - ], - ..Default::default() - }), - tag: "max_messages".to_string(), - }, - // long message - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], - ..Default::default() - }), - tag: "long_message".to_string(), - }, - // max bytecodes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long. - // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number - bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], - ..Default::default() - }), - tag: "max_bytecodes".to_string(), - }, - // long bytecode - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - bytecodes: vec![ - vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; - 1 - ], - ..Default::default() - }), - tag: "long_bytecode".to_string(), - }, - // lots of small repeated writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), - ..Default::default() - }), - tag: "small_repeated_writes".to_string(), - }, - // lots of big repeated writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs( - true, - false, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37, - ), - ..Default::default() - }), - tag: "big_repeated_writes".to_string(), - }, - // lots of small initial writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out - state_diffs: generate_state_diffs( - false, - true, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33, - ), - ..Default::default() - }), - tag: "small_initial_writes".to_string(), - }, - // lots of large initial writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs( - false, - false, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65, - ), - ..Default::default() - }), - tag: "big_initial_writes".to_string(), - }, - ]; - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - let max_used_gas = statistics - .iter() - .map(|s| (s.statistics.max_used_gas, s.tag.clone())) - .max() - .unwrap(); - assert!( - max_used_gas.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", - max_used_gas.1, - max_used_gas.0, - BOOTLOADER_BATCH_TIP_OVERHEAD - ); - - let circuit_statistics = statistics - .iter() - .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) - .max() - .unwrap(); - assert!( - circuit_statistics.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", - circuit_statistics.1, - circuit_statistics.0, - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD - ); - - let execution_metrics_size = statistics - .iter() - .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) - .max() - .unwrap(); - assert!( - execution_metrics_size.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", - execution_metrics_size.1, - execution_metrics_size.0, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD - ); +fn dry_run_upper_bound() { + test_dry_run_upper_bound::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs index 9d23f658cb82..22239a6c1e35 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs @@ -1,57 +1,14 @@ -use assert_matches::assert_matches; -use zksync_types::U256; - use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, - vm_latest::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, + versions::testonly::bootloader::{test_bootloader_out_of_gas, test_dummy_bootloader}, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); +fn dummy_bootloader() { + test_dummy_bootloader::>(); } #[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_bootloader_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); +fn bootloader_out_of_gas() { + test_bootloader_out_of_gas::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs index 2ed9948af819..e0727fbed89b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs @@ -1,41 +1,9 @@ use crate::{ - interface::{TxExecutionMode, VmEvent, VmExecutionMode, VmInterface, VmInterfaceExt}, - utils::bytecode, - vm_latest::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, + versions::testonly::bytecode_publishing::test_bytecode_publishing, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = bytecode::compress(counter.clone()).unwrap().compressed; - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = VmEvent::extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); +fn bytecode_publishing() { + test_bytecode_publishing::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs index e7f26b7faf88..c8f623478569 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs @@ -1,19 +1,15 @@ use std::sync::Arc; use once_cell::sync::OnceCell; +use zksync_test_contracts::TestContract; use zksync_types::{Address, Execute}; +use super::TestedLatestVm; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterface}, tracers::CallTracer, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, - }, + versions::testonly::{read_max_depth_contract, ContractToDeploy, VmTesterBuilder}, + vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, ToTracerPointer}, }; // This test is ultra slow, so it's ignored by default. @@ -22,14 +18,13 @@ use crate::{ fn test_max_depth() { let contarct = read_max_depth_contract(); let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() + .with_rich_accounts(1) .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); + .with_custom_contracts(vec![ContractToDeploy::account(contarct, address)]) + .build::(); let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( @@ -47,23 +42,22 @@ fn test_max_depth() { vm.vm.push_transaction(tx); let res = vm .vm - .inspect(&mut call_tracer.into(), VmExecutionMode::OneTx); + .inspect(&mut call_tracer.into(), InspectExecutionMode::OneTx); assert!(result.get().is_some()); assert!(res.result.is_failed()); } #[test] fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let contract = TestContract::counter().bytecode.to_vec(); + let address = Address::repeat_byte(1); + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() + .with_rich_accounts(1) .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) + .build::(); let increment_by_6_calldata = "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; @@ -84,7 +78,7 @@ fn test_basic_behavior() { vm.vm.push_transaction(tx); let res = vm .vm - .inspect(&mut call_tracer.into(), VmExecutionMode::OneTx); + .inspect(&mut call_tracer.into(), InspectExecutionMode::OneTx); let call_tracer_result = result.get().unwrap(); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs index c3c6816cbd8f..690af7d2a357 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs @@ -1,76 +1,9 @@ -use zksync_types::{Address, Execute, U256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled, - }, + versions::testonly::circuits::test_circuits, + vm_latest::{HistoryEnabled, Vm}, }; -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. #[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(Address::random()), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm - .vm - .inspect(&mut Default::default(), VmExecutionMode::OneTx); - - let s = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - const EXPECTED: [f32; 13] = [ - 1.34935, 0.15026, 1.66666, 0.00315, 1.0594, 0.00058, 0.00348, 0.00076, 0.11945, 0.14285, - 0.0, 0.0, 0.0, - ]; - let actual = [ - (s.main_vm, "main_vm"), - (s.ram_permutation, "ram_permutation"), - (s.storage_application, "storage_application"), - (s.storage_sorter, "storage_sorter"), - (s.code_decommitter, "code_decommitter"), - (s.code_decommitter_sorter, "code_decommitter_sorter"), - (s.log_demuxer, "log_demuxer"), - (s.events_sorter, "events_sorter"), - (s.keccak256, "keccak256"), - (s.ecrecover, "ecrecover"), - (s.sha256, "sha256"), - (s.secp256k1_verify, "secp256k1_verify"), - (s.transient_storage_checker, "transient_storage_checker"), - ]; - for ((actual, name), expected) in actual.iter().zip(EXPECTED) { - if expected == 0.0 { - assert_eq!( - *actual, expected, - "Check failed for {}, expected {}, actual {}", - name, expected, actual - ); - } else { - let diff = (actual - expected) / expected; - assert!( - diff.abs() < 0.1, - "Check failed for {}, expected {}, actual {}", - name, - expected, - actual - ); - } - } +fn circuits() { + test_circuits::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs index b15ef7fde2bf..e50e2aafcbfc 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs @@ -1,282 +1,21 @@ -use ethabi::Token; -use zk_evm_1_5_0::{ - aux_structures::{MemoryPage, Timestamp}, - zkevm_opcode_defs::{ContractCodeSha256Format, VersionedHashLen32}, -}; -use zksync_types::{ - get_known_code_key, web3::keccak256, Address, Execute, StorageLogWithPreviousValue, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::{load_precompiles_contract, read_precompiles_contract, read_test_contract}, - }, - HistoryEnabled, + versions::testonly::code_oracle::{ + test_code_oracle, test_code_oracle_big_bytecode, test_refunds_in_code_oracle, }, + vm_latest::{HistoryEnabled, Vm}, }; -fn generate_large_bytecode() -> Vec { - // This is the maximal possible size of a zkEVM bytecode - vec![2u8; ((1 << 16) - 1) * 32] -} - #[test] -fn test_code_oracle() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - // Filling the zkevm bytecode - let normal_zkevm_bytecode = read_test_contract(); - let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&normal_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![( - precompile_contract_bytecode, - precompiles_contract_address, - false, - )]) - .with_storage(storage) - .build(); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - vm.vm.state.decommittment_processor.populate( - vec![( - h256_to_u256(normal_zkevm_bytecode_hash), - bytes_to_be_words(normal_zkevm_bytecode), - )], - Timestamp(0), - ); - - let account = &mut vm.rich_accounts[0]; - - // Firstly, let's ensure that the contract works. - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - - // Now, we ask for the same bytecode. We use to partially check whether the memory page with - // the decommitted bytecode gets erased (it shouldn't). - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx2); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); -} - -fn find_code_oracle_cost_log( - precompiles_contract_address: Address, - logs: &[StorageLogWithPreviousValue], -) -> &StorageLogWithPreviousValue { - logs.iter() - .find(|log| { - *log.log.key.address() == precompiles_contract_address && log.log.key.key().is_zero() - }) - .expect("no code oracle cost log") +fn code_oracle() { + test_code_oracle::>(); } #[test] -fn test_code_oracle_big_bytecode() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - let big_zkevm_bytecode = generate_large_bytecode(); - let big_zkevm_bytecode_hash = hash_bytecode(&big_zkevm_bytecode); - let big_zkevm_bytecode_keccak_hash = keccak256(&big_zkevm_bytecode); - - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&big_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![( - precompile_contract_bytecode, - precompiles_contract_address, - false, - )]) - .with_storage(storage) - .build(); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - vm.vm.state.decommittment_processor.populate( - vec![( - h256_to_u256(big_zkevm_bytecode_hash), - bytes_to_be_words(big_zkevm_bytecode), - )], - Timestamp(0), - ); - - let account = &mut vm.rich_accounts[0]; - - // Firstly, let's ensure that the contract works. - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(big_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); +fn code_oracle_big_bytecode() { + test_code_oracle_big_bytecode::>(); } #[test] fn refunds_in_code_oracle() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - let normal_zkevm_bytecode = read_test_contract(); - let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_words = bytes_to_be_words(normal_zkevm_bytecode); - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&normal_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - // Execute code oracle twice with identical VM state that only differs in that the queried bytecode - // is already decommitted the second time. The second call must consume less gas (`decommit` doesn't charge additional gas - // for already decommitted codes). - let mut oracle_costs = vec![]; - for decommit in [false, true] { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![( - precompile_contract_bytecode.clone(), - precompiles_contract_address, - false, - )]) - .with_storage(storage.clone()) - .build(); - - vm.vm.state.decommittment_processor.populate( - vec![( - h256_to_u256(normal_zkevm_bytecode_hash), - normal_zkevm_bytecode_words.clone(), - )], - Timestamp(0), - ); - - let account = &mut vm.rich_accounts[0]; - if decommit { - let (header, normalized_preimage) = - ContractCodeSha256Format::normalize_for_decommitment(&normal_zkevm_bytecode_hash.0); - let query = vm - .vm - .state - .prepare_to_decommit( - 0, - header, - normalized_preimage, - MemoryPage(123), - Timestamp(0), - ) - .unwrap(); - - assert!(query.is_fresh); - vm.vm.state.execute_decommit(0, query).unwrap(); - } - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - let log = - find_code_oracle_cost_log(precompiles_contract_address, &result.logs.storage_logs); - oracle_costs.push(log.log.value); - } - - // The refund is equal to `gasCost` parameter passed to the `decommit` opcode, which is defined as `4 * contract_length_in_words` - // in `CodeOracle.yul`. - let code_oracle_refund = h256_to_u256(oracle_costs[0]) - h256_to_u256(oracle_costs[1]); - assert_eq!( - code_oracle_refund, - (4 * normal_zkevm_bytecode_words.len()).into() - ); + test_refunds_in_code_oracle::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/constants.rs b/core/lib/multivm/src/versions/vm_latest/tests/constants.rs index 3b75bfd6d36b..8ee62650ca77 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/constants.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/constants.rs @@ -3,7 +3,7 @@ #[test] fn test_that_bootloader_encoding_space_is_large_enoguh() { let encoding_space = crate::vm_latest::constants::get_bootloader_tx_encoding_space( - crate::vm_latest::MultiVMSubversion::latest(), + crate::vm_latest::MultiVmSubversion::latest(), ); assert!(encoding_space >= 330000, "Bootloader tx space is too small"); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs index aa3eb5e752ce..3d0e21c2466f 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs @@ -1,79 +1,9 @@ -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - utils::fee::get_batch_base_fee, - HistoryEnabled, - }, + versions::testonly::default_aa::test_default_aa_interaction, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); +fn default_aa_interaction() { + test_default_aa_interaction::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs index 34780b73eb05..5b6e24eefbf0 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs @@ -1,510 +1,69 @@ -use std::collections::HashMap; - -use ethabi::Token; use test_casing::{test_casing, Product}; -use zksync_contracts::{load_contract, read_bytecode, SystemContractCode}; -use zksync_system_constants::{ - CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L2_BASE_TOKEN_ADDRESS, -}; -use zksync_test_account::TxType; -use zksync_types::{ - get_code_key, get_known_code_key, - utils::{key_for_eth_balance, storage_key_for_eth_balance}, - AccountTreeId, Address, Execute, StorageKey, H256, U256, -}; -use zksync_utils::{ - be_words_to_bytes, - bytecode::{hash_bytecode, hash_evm_bytecode}, - bytes_to_be_words, h256_to_u256, -}; use crate::{ - interface::{ - storage::InMemoryStorage, TxExecutionMode, VmExecutionResultAndLogs, VmInterfaceExt, - }, - versions::testonly::default_system_env, - vm_latest::{ - tests::tester::{VmTester, VmTesterBuilder}, - HistoryEnabled, + versions::testonly::evm_emulator::{ + test_calling_to_mock_emulator_from_native_contract, test_mock_emulator_basics, + test_mock_emulator_with_delegate_call, test_mock_emulator_with_deployment, + test_mock_emulator_with_partial_reverts, test_mock_emulator_with_payment, + test_mock_emulator_with_recursion, test_mock_emulator_with_recursive_deployment, + test_mock_emulator_with_static_call, test_tracing_evm_contract_deployment, }, + vm_latest::{HistoryEnabled, Vm}, }; -const MOCK_DEPLOYER_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockContractDeployer.json"; -const MOCK_KNOWN_CODE_STORAGE_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockKnownCodeStorage.json"; -const MOCK_EMULATOR_PATH: &str = - "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockEvmEmulator.json"; -const RECURSIVE_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/NativeRecursiveContract.json"; -const INCREMENTING_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/IncrementingContract.json"; - -fn override_system_contracts(storage: &mut InMemoryStorage) { - let mock_deployer = read_bytecode(MOCK_DEPLOYER_PATH); - let mock_deployer_hash = hash_bytecode(&mock_deployer); - let mock_known_code_storage = read_bytecode(MOCK_KNOWN_CODE_STORAGE_PATH); - let mock_known_code_storage_hash = hash_bytecode(&mock_known_code_storage); - - storage.set_value(get_code_key(&CONTRACT_DEPLOYER_ADDRESS), mock_deployer_hash); - storage.set_value( - get_known_code_key(&mock_deployer_hash), - H256::from_low_u64_be(1), - ); - storage.set_value( - get_code_key(&KNOWN_CODES_STORAGE_ADDRESS), - mock_known_code_storage_hash, - ); - storage.set_value( - get_known_code_key(&mock_known_code_storage_hash), - H256::from_low_u64_be(1), - ); - storage.store_factory_dep(mock_deployer_hash, mock_deployer); - storage.store_factory_dep(mock_known_code_storage_hash, mock_known_code_storage); -} - -#[derive(Debug)] -struct EvmTestBuilder { - deploy_emulator: bool, - storage: InMemoryStorage, - evm_contract_addresses: Vec
, -} - -impl EvmTestBuilder { - fn new(deploy_emulator: bool, evm_contract_address: Address) -> Self { - Self { - deploy_emulator, - storage: InMemoryStorage::with_system_contracts(hash_bytecode), - evm_contract_addresses: vec![evm_contract_address], - } - } - - fn with_mock_deployer(mut self) -> Self { - override_system_contracts(&mut self.storage); - self - } - - fn with_evm_address(mut self, address: Address) -> Self { - self.evm_contract_addresses.push(address); - self - } - - fn build(self) -> VmTester { - let mock_emulator = read_bytecode(MOCK_EMULATOR_PATH); - let mut storage = self.storage; - let mut system_env = default_system_env(); - if self.deploy_emulator { - let evm_bytecode: Vec<_> = (0..32).collect(); - let evm_bytecode_hash = hash_evm_bytecode(&evm_bytecode); - storage.set_value( - get_known_code_key(&evm_bytecode_hash), - H256::from_low_u64_be(1), - ); - for evm_address in self.evm_contract_addresses { - storage.set_value(get_code_key(&evm_address), evm_bytecode_hash); - } - - system_env.base_system_smart_contracts.evm_emulator = Some(SystemContractCode { - hash: hash_bytecode(&mock_emulator), - code: bytes_to_be_words(mock_emulator), - }); - } else { - let emulator_hash = hash_bytecode(&mock_emulator); - storage.set_value(get_known_code_key(&emulator_hash), H256::from_low_u64_be(1)); - storage.store_factory_dep(emulator_hash, mock_emulator); - - for evm_address in self.evm_contract_addresses { - storage.set_value(get_code_key(&evm_address), emulator_hash); - // Set `isUserSpace` in the emulator storage to `true`, so that it skips emulator-specific checks - storage.set_value( - StorageKey::new(AccountTreeId::new(evm_address), H256::zero()), - H256::from_low_u64_be(1), - ); - } - } - - VmTesterBuilder::new(HistoryEnabled) - .with_system_env(system_env) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build() - } -} - #[test] fn tracing_evm_contract_deployment() { - let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); - override_system_contracts(&mut storage); - - let mut system_env = default_system_env(); - // The EVM emulator will not be accessed, so we set it to a dummy value. - system_env.base_system_smart_contracts.evm_emulator = - Some(system_env.base_system_smart_contracts.default_aa.clone()); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_system_env(system_env) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let account = &mut vm.rich_accounts[0]; - - let args = [Token::Bytes((0..32).collect())]; - let evm_bytecode = ethabi::encode(&args); - let expected_bytecode_hash = hash_evm_bytecode(&evm_bytecode); - let execute = Execute::for_deploy(expected_bytecode_hash, vec![0; 32], &args); - let deploy_tx = account.get_l2_tx_for_execute(execute, None); - let (_, vm_result) = vm - .vm - .execute_transaction_with_bytecode_compression(deploy_tx, true); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); - - // Check that the surrogate EVM bytecode was added to the decommitter. - let known_bytecodes = vm.vm.state.decommittment_processor.known_bytecodes.inner(); - let known_evm_bytecode = - be_words_to_bytes(&known_bytecodes[&h256_to_u256(expected_bytecode_hash)]); - assert_eq!(known_evm_bytecode, evm_bytecode); - - let new_known_factory_deps = vm_result.new_known_factory_deps.unwrap(); - assert_eq!(new_known_factory_deps.len(), 2); // the deployed EraVM contract + EVM contract - assert_eq!( - new_known_factory_deps[&expected_bytecode_hash], - evm_bytecode - ); + test_tracing_evm_contract_deployment::>(); } #[test] fn mock_emulator_basics() { - let called_address = Address::repeat_byte(0x23); - let mut vm = EvmTestBuilder::new(true, called_address).build(); - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(called_address), - calldata: vec![], - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - - let (_, vm_result) = vm - .vm - .execute_transaction_with_bytecode_compression(tx, true); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + test_mock_emulator_basics::>(); } -const RECIPIENT_ADDRESS: Address = Address::repeat_byte(0x12); - -/// `deploy_emulator = false` here and below tests the mock emulator as an ordinary contract (i.e., sanity-checks its logic). #[test_casing(2, [false, true])] #[test] fn mock_emulator_with_payment(deploy_emulator: bool) { - let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); - let mut vm = EvmTestBuilder::new(deploy_emulator, RECIPIENT_ADDRESS).build(); - - let mut current_balance = U256::zero(); - for i in 1_u64..=5 { - let transferred_value = (1_000_000_000 * i).into(); - let vm_result = test_payment( - &mut vm, - &mock_emulator_abi, - &mut current_balance, - transferred_value, - ); - - let balance_storage_logs = vm_result.logs.storage_logs.iter().filter_map(|log| { - (*log.log.key.address() == L2_BASE_TOKEN_ADDRESS) - .then_some((*log.log.key.key(), h256_to_u256(log.log.value))) - }); - let balances: HashMap<_, _> = balance_storage_logs.collect(); - assert_eq!( - balances[&key_for_eth_balance(&RECIPIENT_ADDRESS)], - current_balance - ); - } -} - -fn test_payment( - vm: &mut VmTester, - mock_emulator_abi: ðabi::Contract, - balance: &mut U256, - transferred_value: U256, -) -> VmExecutionResultAndLogs { - *balance += transferred_value; - let test_payment_fn = mock_emulator_abi.function("testPayment").unwrap(); - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(RECIPIENT_ADDRESS), - calldata: test_payment_fn - .encode_input(&[Token::Uint(transferred_value), Token::Uint(*balance)]) - .unwrap(), - value: transferred_value, - factory_deps: vec![], - }, - None, - ); - - let (_, vm_result) = vm - .vm - .execute_transaction_with_bytecode_compression(tx, true); - assert!(!vm_result.result.is_failed(), "{vm_result:?}"); - vm_result + test_mock_emulator_with_payment::>(deploy_emulator); } #[test_casing(4, Product(([false, true], [false, true])))] #[test] fn mock_emulator_with_recursion(deploy_emulator: bool, is_external: bool) { - let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); - let recipient_address = Address::repeat_byte(0x12); - let mut vm = EvmTestBuilder::new(deploy_emulator, recipient_address).build(); - let account = &mut vm.rich_accounts[0]; - - let test_recursion_fn = mock_emulator_abi - .function(if is_external { - "testExternalRecursion" - } else { - "testRecursion" - }) - .unwrap(); - let mut expected_value = U256::one(); - let depth = 50_u32; - for i in 2..=depth { - expected_value *= i; - } - - let factory_deps = if is_external { - vec![read_bytecode(RECURSIVE_CONTRACT_PATH)] - } else { - vec![] - }; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(recipient_address), - calldata: test_recursion_fn - .encode_input(&[Token::Uint(depth.into()), Token::Uint(expected_value)]) - .unwrap(), - value: 0.into(), - factory_deps, - }, - None, - ); - let (_, vm_result) = vm - .vm - .execute_transaction_with_bytecode_compression(tx, true); - assert!(!vm_result.result.is_failed(), "{vm_result:?}"); + test_mock_emulator_with_recursion::>(deploy_emulator, is_external); } #[test] fn calling_to_mock_emulator_from_native_contract() { - let recipient_address = Address::repeat_byte(0x12); - let mut vm = EvmTestBuilder::new(true, recipient_address).build(); - let account = &mut vm.rich_accounts[0]; - - // Deploy a native contract. - let native_contract = read_bytecode(RECURSIVE_CONTRACT_PATH); - let native_contract_abi = load_contract(RECURSIVE_CONTRACT_PATH); - let deploy_tx = account.get_deploy_tx( - &native_contract, - Some(&[Token::Address(recipient_address)]), - TxType::L2, - ); - let (_, vm_result) = vm - .vm - .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); - - // Call from the native contract to the EVM emulator. - let test_fn = native_contract_abi.function("recurse").unwrap(); - let test_tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(deploy_tx.address), - calldata: test_fn.encode_input(&[Token::Uint(50.into())]).unwrap(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - let (_, vm_result) = vm - .vm - .execute_transaction_with_bytecode_compression(test_tx, true); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + test_calling_to_mock_emulator_from_native_contract::>(); } #[test] fn mock_emulator_with_deployment() { - let contract_address = Address::repeat_byte(0xaa); - let mut vm = EvmTestBuilder::new(true, contract_address) - .with_mock_deployer() - .build(); - let account = &mut vm.rich_accounts[0]; - - let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); - let new_evm_bytecode = vec![0xfe; 96]; - let new_evm_bytecode_hash = hash_evm_bytecode(&new_evm_bytecode); - - let test_fn = mock_emulator_abi.function("testDeploymentAndCall").unwrap(); - let test_tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(contract_address), - calldata: test_fn - .encode_input(&[ - Token::FixedBytes(new_evm_bytecode_hash.0.into()), - Token::Bytes(new_evm_bytecode.clone()), - ]) - .unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - let (_, vm_result) = vm - .vm - .execute_transaction_with_bytecode_compression(test_tx, true); - assert!(!vm_result.result.is_failed(), "{vm_result:?}"); - - let factory_deps = vm_result.new_known_factory_deps.unwrap(); - assert_eq!( - factory_deps, - HashMap::from([(new_evm_bytecode_hash, new_evm_bytecode)]) - ); + test_mock_emulator_with_deployment::>(false); } #[test] -fn mock_emulator_with_delegate_call() { - let evm_contract_address = Address::repeat_byte(0xaa); - let other_evm_contract_address = Address::repeat_byte(0xbb); - let mut builder = EvmTestBuilder::new(true, evm_contract_address); - builder.storage.set_value( - storage_key_for_eth_balance(&evm_contract_address), - H256::from_low_u64_be(1_000_000), - ); - builder.storage.set_value( - storage_key_for_eth_balance(&other_evm_contract_address), - H256::from_low_u64_be(2_000_000), - ); - let mut vm = builder.with_evm_address(other_evm_contract_address).build(); - let account = &mut vm.rich_accounts[0]; - - // Deploy a native contract. - let native_contract = read_bytecode(INCREMENTING_CONTRACT_PATH); - let native_contract_abi = load_contract(INCREMENTING_CONTRACT_PATH); - let deploy_tx = account.get_deploy_tx(&native_contract, None, TxType::L2); - let (_, vm_result) = vm - .vm - .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); - - let test_fn = native_contract_abi.function("testDelegateCall").unwrap(); - // Delegate to the native contract from EVM. - test_delegate_call(&mut vm, test_fn, evm_contract_address, deploy_tx.address); - // Delegate to EVM from the native contract. - test_delegate_call(&mut vm, test_fn, deploy_tx.address, evm_contract_address); - // Delegate to EVM from EVM. - test_delegate_call( - &mut vm, - test_fn, - evm_contract_address, - other_evm_contract_address, - ); +fn mock_emulator_with_reverted_deployment() { + test_mock_emulator_with_deployment::>(true); } -fn test_delegate_call( - vm: &mut VmTester, - test_fn: ðabi::Function, - from: Address, - to: Address, -) { - let account = &mut vm.rich_accounts[0]; - let test_tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(from), - calldata: test_fn.encode_input(&[Token::Address(to)]).unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - let (_, vm_result) = vm - .vm - .execute_transaction_with_bytecode_compression(test_tx, true); - assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +#[test] +fn mock_emulator_with_recursive_deployment() { + test_mock_emulator_with_recursive_deployment::>(); } #[test] -fn mock_emulator_with_static_call() { - let evm_contract_address = Address::repeat_byte(0xaa); - let other_evm_contract_address = Address::repeat_byte(0xbb); - let mut builder = EvmTestBuilder::new(true, evm_contract_address); - builder.storage.set_value( - storage_key_for_eth_balance(&evm_contract_address), - H256::from_low_u64_be(1_000_000), - ); - builder.storage.set_value( - storage_key_for_eth_balance(&other_evm_contract_address), - H256::from_low_u64_be(2_000_000), - ); - // Set differing read values for tested contracts. The slot index is defined in the contract. - let value_slot = H256::from_low_u64_be(0x123); - builder.storage.set_value( - StorageKey::new(AccountTreeId::new(evm_contract_address), value_slot), - H256::from_low_u64_be(100), - ); - builder.storage.set_value( - StorageKey::new(AccountTreeId::new(other_evm_contract_address), value_slot), - H256::from_low_u64_be(200), - ); - let mut vm = builder.with_evm_address(other_evm_contract_address).build(); - let account = &mut vm.rich_accounts[0]; - - // Deploy a native contract. - let native_contract = read_bytecode(INCREMENTING_CONTRACT_PATH); - let native_contract_abi = load_contract(INCREMENTING_CONTRACT_PATH); - let deploy_tx = account.get_deploy_tx(&native_contract, None, TxType::L2); - let (_, vm_result) = vm - .vm - .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +fn mock_emulator_with_partial_reverts() { + test_mock_emulator_with_partial_reverts::>(); +} - let test_fn = native_contract_abi.function("testStaticCall").unwrap(); - // Call to the native contract from EVM. - test_static_call(&mut vm, test_fn, evm_contract_address, deploy_tx.address, 0); - // Call to EVM from the native contract. - test_static_call( - &mut vm, - test_fn, - deploy_tx.address, - evm_contract_address, - 100, - ); - // Call to EVM from EVM. - test_static_call( - &mut vm, - test_fn, - evm_contract_address, - other_evm_contract_address, - 200, - ); +#[test] +fn mock_emulator_with_delegate_call() { + test_mock_emulator_with_delegate_call::>(); } -fn test_static_call( - vm: &mut VmTester, - test_fn: ðabi::Function, - from: Address, - to: Address, - expected_value: u64, -) { - let account = &mut vm.rich_accounts[0]; - let test_tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(from), - calldata: test_fn - .encode_input(&[Token::Address(to), Token::Uint(expected_value.into())]) - .unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - let (_, vm_result) = vm - .vm - .execute_transaction_with_bytecode_compression(test_tx, true); - assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +#[test] +fn mock_emulator_with_static_call() { + test_mock_emulator_with_static_call::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs index cc9aac5bb91b..5aa7ab9e9c71 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs @@ -1,46 +1,9 @@ -use zksync_test_account::Account; -use zksync_types::{fee::Fee, Execute}; - use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_latest::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - HistoryDisabled, - }, + versions::testonly::gas_limit::test_tx_gas_limit_offset, + vm_latest::{HistoryEnabled, Vm}, }; -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. #[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Account::default_fee() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); +fn tx_gas_limit_offset() { + test_tx_gas_limit_offset::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs index d7cadc54b442..7f39915f2b64 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs @@ -1,252 +1,22 @@ -use std::{ - collections::{HashMap, HashSet}, - iter, - str::FromStr, -}; - -use assert_matches::assert_matches; -use ethabi::Token; -use itertools::Itertools; -use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; -use zk_evm_1_5_0::{ - abstractions::DecommittmentProcessor, - aux_structures::{DecommittmentQuery, MemoryPage, Timestamp}, - zkevm_opcode_defs::{VersionedHashHeader, VersionedHashNormalizedPreimage}, -}; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Address, Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; -use zksync_vm_interface::VmExecutionResultAndLogs; - use crate::{ - interface::{ - storage::WriteStorage, ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceExt, + versions::testonly::get_used_contracts::{ + test_get_used_contracts, test_get_used_contracts_with_far_call, + test_get_used_contracts_with_out_of_gas_far_call, }, - vm_latest::{ - tests::{ - tester::{TxType, VmTester, VmTesterBuilder}, - utils::{read_proxy_counter_contract, read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryDisabled, Vm, - }, - HistoryMode, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_base_system_contracts(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_used_contracts()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_base_system_contracts(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_used_contracts()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), - calldata: big_calldata, - value: Default::default(), - factory_deps: vec![vec![1; 32]], - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_base_system_contracts(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -#[test] -fn test_contract_is_used_right_after_prepare_to_decommit() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(vm.vm.get_used_contracts().is_empty()); - - let bytecode_hash = - U256::from_str("0x100067ff3124f394104ab03481f7923f0bc4029a2aa9d41cc1d848c81257185") - .unwrap(); - vm.vm - .state - .decommittment_processor - .populate(vec![(bytecode_hash, vec![])], Timestamp(0)); - - let header = hex::decode("0100067f").unwrap(); - let normalized_preimage = - hex::decode("f3124f394104ab03481f7923f0bc4029a2aa9d41cc1d848c81257185").unwrap(); - vm.vm - .state - .decommittment_processor - .prepare_to_decommit( - 0, - DecommittmentQuery { - header: VersionedHashHeader(header.try_into().unwrap()), - normalized_preimage: VersionedHashNormalizedPreimage( - normalized_preimage.try_into().unwrap(), - ), - timestamp: Timestamp(0), - memory_page: MemoryPage(0), - decommitted_length: 0, - is_fresh: false, - }, - ) - .unwrap(); - - assert_eq!(vm.vm.get_used_contracts(), vec![bytecode_hash]); -} - -fn known_bytecodes_without_base_system_contracts( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_base_system_contracts = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - known_bytecodes_without_base_system_contracts - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - if let Some(evm_emulator) = &BASE_SYSTEM_CONTRACTS.evm_emulator { - known_bytecodes_without_base_system_contracts - .remove(&h256_to_u256(evm_emulator.hash)) - .unwrap(); - } - known_bytecodes_without_base_system_contracts -} - -/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial -/// decommitment cost (>10,000 gas). -fn inflated_counter_bytecode() -> Vec { - let mut counter_bytecode = read_test_contract(); - counter_bytecode.extend( - iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) - .take(10_000) - .flatten(), - ); - counter_bytecode -} - -fn execute_proxy_counter(gas: u32) -> (VmTester, U256, VmExecutionResultAndLogs) { - let counter_bytecode = inflated_counter_bytecode(); - let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); - let counter_address = Address::repeat_byte(0x23); - - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let (proxy_counter_bytecode, proxy_counter_abi) = read_proxy_counter_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx( - &proxy_counter_bytecode, - Some(&[Token::Address(counter_address)]), - TxType::L2, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); - compression_result.unwrap(); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let decommitted_hashes = vm.vm.get_used_contracts(); - assert!( - !decommitted_hashes.contains(&counter_bytecode_hash), - "{decommitted_hashes:?}" - ); - - let increment = proxy_counter_abi.function("increment").unwrap(); - let increment_tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(deploy_tx.address), - calldata: increment - .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) - .unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(increment_tx, true); - compression_result.unwrap(); - (vm, counter_bytecode_hash, exec_result) +fn get_used_contracts() { + test_get_used_contracts::>(); } #[test] fn get_used_contracts_with_far_call() { - let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(100_000); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - let decommitted_hashes = vm.vm.get_used_contracts(); - assert!( - decommitted_hashes.contains(&counter_bytecode_hash), - "{decommitted_hashes:?}" - ); + test_get_used_contracts_with_far_call::>(); } #[test] fn get_used_contracts_with_out_of_gas_far_call() { - let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(10_000); - assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); - let decommitted_hashes = vm.vm.get_used_contracts(); - assert!( - decommitted_hashes.contains(&counter_bytecode_hash), - "{decommitted_hashes:?}" - ); + test_get_used_contracts_with_out_of_gas_far_call::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs index 8206cfa9be6f..193fc586079b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs @@ -1,49 +1,9 @@ -use zksync_types::get_nonce_key; - use crate::{ - interface::{ - storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - }, - vm_latest::{ - tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryDisabled, - }, + versions::testonly::is_write_initial::test_is_write_initial_behaviour, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); +fn is_write_initial_behaviour() { + test_is_write_initial_behaviour::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index e0e4e8228f9f..3b8a01dbc80f 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -1,195 +1,22 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; -use zksync_test_account::Account; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - Execute, ExecuteTransactionCommon, K256PrivateKey, U256, -}; -use zksync_utils::u256_to_h256; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - utils::StorageWritesDeduplicator, - vm_latest::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - HistoryEnabled, + versions::testonly::l1_tx_execution::{ + test_l1_tx_execution, test_l1_tx_execution_gas_estimation_with_low_gas, + test_l1_tx_execution_high_gas_limit, }, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 9 initial writes here, because we pay fees from l1: - // - `totalSupply` of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - `tx_rolling` hash - // - `gasPerPubdataByte` - // - `basePubdataSpent` - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in `L1Messenger` - - // TODO(PLA-537): right now we are using 5 slots instead of 9 due to 0 fee for transaction. - let basic_initial_writes = 5; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data = TransactionData::new(deploy_tx.tx.clone(), false); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes, basic_initial_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract. - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated. - // But now the base pubdata spent has changed too. - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); +fn l1_tx_execution() { + test_l1_tx_execution::>(); } #[test] -fn test_l1_tx_execution_high_gas_limit() { - // In this test, we try to execute an L1->L2 transaction with a high gas limit. - // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, - // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![Account::new( - K256PrivateKey::from_bytes([0xad; 32].into()).unwrap(), - )]) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let l1_messenger = l1_messenger_contract(); - - let contract_function = l1_messenger.function("sendToL1").unwrap(); - let params = [ - // Even a message of size 100k should not be able to be sent by a priority transaction - Token::Bytes(vec![0u8; 100_000]), - ]; - let calldata = contract_function.encode_input(¶ms).unwrap(); - - let mut tx = account.get_l1_tx( - Execute { - contract_address: Some(L1_MESSENGER_ADDRESS), - value: 0.into(), - factory_deps: vec![], - calldata, - }, - 0, - ); - - if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { - // Using some large gas limit - data.gas_limit = 300_000_000.into(); - } else { - unreachable!() - }; - - vm.vm.push_transaction(tx); - - let res = vm.vm.execute(VmExecutionMode::OneTx); +fn l1_tx_execution_high_gas_limit() { + test_l1_tx_execution_high_gas_limit::>(); +} - assert!(res.result.is_failed(), "The transaction should've failed"); +#[test] +fn l1_tx_execution_gas_estimation_with_low_gas() { + test_l1_tx_execution_gas_estimation_with_low_gas::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs index 1b5c3db59f72..82003b4a6abd 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs @@ -1,433 +1,33 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zk_evm_1_5_0::aux_structures::Timestamp; -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, L2BlockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, L2BlockNumber, - ProtocolVersionId, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - use crate::{ - interface::{ - storage::WriteStorage, ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, - VmInterface, VmInterfaceExt, - }, - vm_latest::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, - utils::l2_blocks::get_l2_block_hash_key, - HistoryEnabled, Vm, + versions::testonly::l2_blocks::{ + test_l2_block_first_in_batch, test_l2_block_initialization_number_non_zero, + test_l2_block_initialization_timestamp, test_l2_block_new_l2_block, + test_l2_block_same_l2_block, }, - HistoryMode, + vm_latest::{HistoryEnabled, Vm}, }; -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute::default(), - received_timestamp_ms: 0, - raw_bytes: None, - } -} - #[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); +fn l2_block_initialization_timestamp() { + test_l2_block_initialization_timestamp::>(); } #[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_initialization_number_non_zero() { + test_l2_block_initialization_number_non_zero::>(); } #[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_same_l2_block() { + test_l2_block_same_l2_block::>(); } #[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - L2BlockHasher::legacy_hash(L2BlockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_new_l2_block() { + test_l2_block_new_l2_block::>(); } #[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); - let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); - let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) +fn l2_block_first_in_batch() { + test_l2_block_first_in_batch::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs index fadb05cc4d19..b059c9716d89 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs @@ -1,3 +1,37 @@ +use std::{ + collections::{HashMap, HashSet}, + rc::Rc, +}; + +use zk_evm_1_5_0::{ + aux_structures::{MemoryPage, Timestamp}, + vm_state::VmLocalState, + zkevm_opcode_defs::{ContractCodeSha256Format, VersionedHashLen32}, +}; +use zksync_types::{ + bytecode::BytecodeHash, writes::StateDiffRecord, StorageKey, StorageValue, Transaction, H256, + U256, +}; +use zksync_vm_interface::pubdata::PubdataBuilder; + +use super::{HistoryEnabled, Vm}; +use crate::{ + interface::{ + storage::{InMemoryStorage, ReadStorage, StorageView, WriteStorage}, + CurrentExecutionState, L2BlockEnv, VmExecutionMode, VmExecutionResultAndLogs, + }, + utils::bytecode::bytes_to_be_words, + versions::testonly::{filter_out_base_system_contracts, TestedVm}, + vm_latest::{ + constants::BOOTLOADER_HEAP_PAGE, + old_vm::{event_sink::InMemoryEventSink, history_recorder::HistoryRecorder}, + tracers::PubdataTracer, + types::internals::TransactionData, + utils::logs::StorageLogQuery, + AppDataFrameManagerWithHistory, HistoryMode, SimpleMemory, TracerDispatcher, + }, +}; + mod bootloader; mod default_aa; // TODO - fix this test @@ -20,11 +54,254 @@ mod prestate_tracer; mod refunds; mod require_eip712; mod rollbacks; -mod sekp256r1; +mod secp256r1; mod simple_execution; mod storage; -mod tester; mod tracing_execution_error; mod transfer; mod upgrade; -mod utils; + +type TestedLatestVm = Vm, HistoryEnabled>; + +impl TestedVm for TestedLatestVm { + type StateDump = VmInstanceInnerState; + + fn dump_state(&self) -> Self::StateDump { + self.dump_inner_state() + } + + fn gas_remaining(&mut self) -> u32 { + self.state.local_state.callstack.current.ergs_remaining + } + + fn get_current_execution_state(&self) -> CurrentExecutionState { + self.get_current_execution_state() + } + + fn decommitted_hashes(&self) -> HashSet { + self.get_used_contracts().into_iter().collect() + } + + fn finish_batch_with_state_diffs( + &mut self, + diffs: Vec, + pubdata_builder: Rc, + ) -> VmExecutionResultAndLogs { + let pubdata_tracer = PubdataTracer::new_with_forced_state_diffs( + self.batch_env.clone(), + VmExecutionMode::Batch, + diffs, + crate::vm_latest::MultiVmSubversion::latest(), + Some(pubdata_builder), + ); + self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + Some(pubdata_tracer), + ) + } + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs { + self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + None, + ) + } + + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]) { + let bytecodes = bytecodes + .iter() + .map(|&bytecode| { + let hash = BytecodeHash::for_bytecode(bytecode).value_u256(); + let words = bytes_to_be_words(bytecode); + (hash, words) + }) + .collect(); + self.state + .decommittment_processor + .populate(bytecodes, Timestamp(0)); + } + + fn known_bytecode_hashes(&self) -> HashSet { + let mut bytecode_hashes: HashSet<_> = self + .state + .decommittment_processor + .known_bytecodes + .inner() + .keys() + .copied() + .collect(); + filter_out_base_system_contracts(&mut bytecode_hashes); + bytecode_hashes + } + + fn manually_decommit(&mut self, code_hash: H256) -> bool { + let (header, normalized_preimage) = + ContractCodeSha256Format::normalize_for_decommitment(&code_hash.0); + let query = self + .state + .prepare_to_decommit( + 0, + header, + normalized_preimage, + MemoryPage(123), + Timestamp(0), + ) + .unwrap(); + self.state.execute_decommit(0, query).unwrap(); + query.is_fresh + } + + fn verify_required_bootloader_heap(&self, cells: &[(u32, U256)]) { + for &(slot, required_value) in cells { + let current_value = self + .state + .memory + .read_slot(BOOTLOADER_HEAP_PAGE as usize, slot as usize) + .value; + assert_eq!(current_value, required_value); + } + } + + fn write_to_bootloader_heap(&mut self, cells: &[(usize, U256)]) { + let timestamp = Timestamp(self.state.local_state.timestamp); + self.state + .memory + .populate_page(BOOTLOADER_HEAP_PAGE as usize, cells.to_vec(), timestamp) + } + + fn read_storage(&mut self, key: StorageKey) -> U256 { + self.state.storage.storage.read_from_storage(&key) + } + + fn last_l2_block_hash(&self) -> H256 { + self.bootloader_state.last_l2_block().get_hash() + } + + fn push_l2_block_unchecked(&mut self, block: L2BlockEnv) { + self.bootloader_state.push_l2_block(block); + } + + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { + let tx = TransactionData::new(tx, false); + let overhead = tx.overhead_gas(); + self.push_raw_transaction(tx, overhead, refund, true) + } +} + +#[derive(Clone, Debug)] +pub(crate) struct ModifiedKeysMap(HashMap); + +impl ModifiedKeysMap { + fn new(storage: &mut StorageView) -> Self { + let mut modified_keys = storage.modified_storage_keys().clone(); + let inner = storage.inner_mut(); + // Remove modified keys that were set to the same value (e.g., due to a rollback). + modified_keys.retain(|key, value| inner.read_value(key) != *value); + Self(modified_keys) + } +} + +// We consider hashmaps to be equal even if there is a key +// that is not present in one but has zero value in another. +impl PartialEq for ModifiedKeysMap { + fn eq(&self, other: &Self) -> bool { + for (key, value) in &self.0 { + if *value != other.0.get(key).copied().unwrap_or_default() { + return false; + } + } + for (key, value) in &other.0 { + if *value != self.0.get(key).copied().unwrap_or_default() { + return false; + } + } + true + } +} + +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct DecommitterTestInnerState { + /// There is no way to "truly" compare the storage pointer, + /// so we just compare the modified keys. This is reasonable enough. + pub(crate) modified_storage_keys: ModifiedKeysMap, + pub(crate) known_bytecodes: HistoryRecorder>, H>, + pub(crate) decommitted_code_hashes: HistoryRecorder>, HistoryEnabled>, +} + +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct StorageOracleInnerState { + /// There is no way to "truly" compare the storage pointer, + /// so we just compare the modified keys. This is reasonable enough. + pub(crate) modified_storage_keys: ModifiedKeysMap, + pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, + pub(crate) paid_changes: HistoryRecorder, H>, + pub(crate) initial_values: HistoryRecorder, H>, + pub(crate) returned_io_refunds: HistoryRecorder, H>, + pub(crate) returned_pubdata_costs: HistoryRecorder, H>, +} + +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct PrecompileProcessorTestInnerState { + pub(crate) timestamp_history: HistoryRecorder, H>, +} + +/// A struct that encapsulates the state of the VM's oracles +/// The state is to be used in tests. +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct VmInstanceInnerState { + event_sink: InMemoryEventSink, + precompile_processor_state: PrecompileProcessorTestInnerState, + memory: SimpleMemory, + decommitter_state: DecommitterTestInnerState, + storage_oracle_state: StorageOracleInnerState, + local_state: VmLocalState, +} + +impl Vm, H> { + // Dump inner state of the VM. + pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { + let event_sink = self.state.event_sink.clone(); + let precompile_processor_state = PrecompileProcessorTestInnerState { + timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), + }; + let memory = self.state.memory.clone(); + let decommitter_state = DecommitterTestInnerState { + modified_storage_keys: ModifiedKeysMap::new( + &mut self + .state + .decommittment_processor + .get_storage() + .borrow_mut(), + ), + known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), + decommitted_code_hashes: self + .state + .decommittment_processor + .get_decommitted_code_hashes_with_history() + .clone(), + }; + + let storage_oracle_state = StorageOracleInnerState { + modified_storage_keys: ModifiedKeysMap::new( + &mut self.state.storage.storage.get_ptr().borrow_mut(), + ), + frames_stack: self.state.storage.storage_frames_stack.clone(), + paid_changes: self.state.storage.paid_changes.clone(), + initial_values: self.state.storage.initial_values.clone(), + returned_io_refunds: self.state.storage.returned_io_refunds.clone(), + returned_pubdata_costs: self.state.storage.returned_pubdata_costs.clone(), + }; + let local_state = self.state.local_state.clone(); + + VmInstanceInnerState { + event_sink, + precompile_processor_state, + memory, + decommitter_state, + storage_oracle_state, + local_state, + } + } +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs index 397790a7c957..c7ea3242d4a6 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs @@ -1,187 +1,9 @@ -use zksync_types::{Execute, Nonce}; - use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterfaceExt, - VmRevertReason, - }, - vm_latest::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - HistoryEnabled, - }, + versions::testonly::nonce_holder::test_nonce_holder, + vm_latest::{HistoryEnabled, Vm}, }; -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - #[test] -fn test_nonce_holder() { - let mut account = Account::random(); - let hex_addr = hex::encode(account.address.to_fixed_bytes()); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let tx = account.get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: Some(account.address), - calldata: vec![12], - value: Default::default(), - factory_deps: vec![], - }, - None, - Nonce(nonce), - ); - let mut transaction_data = TransactionData::new(tx, false); - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), - "Allowed to leave nonce as unused", - ); +fn nonce_holder() { + test_nonce_holder::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs index 110b14146c7a..7ef45721ea5d 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs @@ -1,142 +1,19 @@ -use zk_evm_1_5_0::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - HistoryEnabled, - }, + versions::testonly::precompiles::{test_ecrecover, test_keccak, test_sha256}, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm - .vm - .inspect(&mut Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); +fn keccak() { + test_keccak::>(); } #[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm - .vm - .inspect(&mut Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); +fn sha256() { + test_sha256::>(); } #[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(account.address), - calldata: Vec::new(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm - .vm - .inspect(&mut Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); +fn ecrecover() { + test_ecrecover::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs index 230b1d0ad876..8dce2765233c 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs @@ -1,28 +1,25 @@ use std::sync::Arc; use once_cell::sync::OnceCell; -use zksync_test_account::TxType; +use zksync_test_contracts::{TestContract, TxType}; use zksync_types::{utils::deployed_address_create, Execute, U256}; +use super::TestedLatestVm; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterface, VmInterfaceExt}, tracers::PrestateTracer, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_simple_transfer_contract}, - HistoryEnabled, ToTracerPointer, - }, + versions::testonly::VmTesterBuilder, + vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, ToTracerPointer}, }; #[test] fn test_prestate_tracer() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() + .with_rich_accounts(1) .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); + .build::(); vm.deploy_test_contract(); let account = &mut vm.rich_accounts[0]; @@ -41,7 +38,7 @@ fn test_prestate_tracer() { let prestate_tracer = PrestateTracer::new(false, prestate_tracer_result.clone()); let tracer_ptr = prestate_tracer.into_tracer_pointer(); vm.vm - .inspect(&mut tracer_ptr.into(), VmExecutionMode::Batch); + .inspect(&mut tracer_ptr.into(), InspectExecutionMode::OneTx); let prestate_result = Arc::try_unwrap(prestate_tracer_result) .unwrap() @@ -53,37 +50,27 @@ fn test_prestate_tracer() { #[test] fn test_prestate_tracer_diff_mode() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() + .with_rich_accounts(1) .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - let contract = read_simple_transfer_contract(); - let tx = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; + .build::(); + let contract = TestContract::simple_transfer().bytecode; + let account = &mut vm.rich_accounts[0]; + let tx = account.get_deploy_tx(contract, None, TxType::L2).tx; let nonce = tx.nonce().unwrap().0.into(); vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce); + vm.vm.execute(InspectExecutionMode::OneTx); + let deployed_address = deployed_address_create(account.address, nonce); vm.test_contract = Some(deployed_address); // Deploy a second copy of the contract to see its appearance in the pre-state - let tx2 = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; + let tx2 = account.get_deploy_tx(contract, None, TxType::L2).tx; let nonce2 = tx2.nonce().unwrap().0.into(); vm.vm.push_transaction(tx2); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address2 = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce2); + vm.vm.execute(InspectExecutionMode::OneTx); + let deployed_address2 = deployed_address_create(account.address, nonce2); let account = &mut vm.rich_accounts[0]; @@ -111,7 +98,7 @@ fn test_prestate_tracer_diff_mode() { let prestate_tracer = PrestateTracer::new(true, prestate_tracer_result.clone()); let tracer_ptr = prestate_tracer.into_tracer_pointer(); vm.vm - .inspect(&mut tracer_ptr.into(), VmExecutionMode::Bootloader); + .inspect(&mut tracer_ptr.into(), InspectExecutionMode::Bootloader); let prestate_result = Arc::try_unwrap(prestate_tracer_result) .unwrap() diff --git a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs index c00192aa8f10..dfbec1706828 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs @@ -1,228 +1,16 @@ -use ethabi::Token; -use zksync_types::{Address, Execute, U256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{read_expensive_contract, read_test_contract}, - }, - types::internals::TransactionData, - HistoryEnabled, + versions::testonly::refunds::{ + test_negative_pubdata_for_transaction, test_predetermined_refunded_gas, }, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx = TransactionData::new(tx, false); - // Overhead - let overhead = tx.overhead_gas(); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.deduplicated_storage_logs, - current_state_without_predefined_refunds.deduplicated_storage_logs - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .deduplicated_storage_logs - .len(), - current_state_without_predefined_refunds - .deduplicated_storage_logs - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.deduplicated_storage_logs, - current_state_without_predefined_refunds.deduplicated_storage_logs - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); +fn predetermined_refunded_gas() { + test_predetermined_refunded_gas::>(); } #[test] fn negative_pubdata_for_transaction() { - let expensive_contract_address = Address::random(); - let (expensive_contract_bytecode, expensive_contract) = read_expensive_contract(); - let expensive_function = expensive_contract.function("expensive").unwrap(); - let cleanup_function = expensive_contract.function("cleanUp").unwrap(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![( - expensive_contract_bytecode, - expensive_contract_address, - false, - )]) - .build(); - - let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(expensive_contract_address), - calldata: expensive_function - .encode_input(&[Token::Uint(10.into())]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(expensive_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - - // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. - let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(expensive_contract_address), - calldata: cleanup_function.encode_input(&[]).unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(clean_up_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - assert!(result.refunds.operator_suggested_refund > 0); - assert_eq!( - result.refunds.gas_refunded, - result.refunds.operator_suggested_refund - ); + test_negative_pubdata_for_transaction::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs index 1f38c6f947e3..470ddb286997 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs @@ -1,166 +1,9 @@ -use ethabi::Token; -use zksync_eth_signer::TransactionParameters; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, - HistoryDisabled, - }, + versions::testonly::require_eip712::test_require_eip712, + vm_latest::{HistoryEnabled, Vm}, }; -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM #[test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -fn test_require_eip712() { - // Use 3 accounts: - // - `private_address` - EOA account, where we have the key - // - `account_address` - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the `private_address`. - // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: Some(account_abstraction.address), - calldata: encoded_input, - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - max_fee_per_blob_gas: None, - blob_versioned_hashes: None, - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000, false).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.into(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - Some(beneficiary.address), - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - vec![], - Default::default(), - ); - - let mut transaction_request: TransactionRequest = tx_712.into(); - transaction_request.chain_id = Some(chain_id.into()); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000, false).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.into(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); +fn require_eip712() { + test_require_eip712::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index 2e854cfc784d..f126a7f8fbdd 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -1,186 +1,36 @@ -use assert_matches::assert_matches; use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{get_nonce_key, Address, Execute, Nonce, U256}; +use zksync_test_contracts::{ + DeployContractsTx, LoadnextContractExecutionParams, TestContract, TxType, +}; +use zksync_types::{get_nonce_key, U256}; +use zksync_vm_interface::InspectExecutionMode; +use super::TestedLatestVm; use crate::{ interface::{ storage::WriteStorage, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - VmInterfaceHistoryEnabled, + TxExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, tracers::dynamic::vm_1_5_0::DynTracer, + versions::testonly::{ + rollbacks::{test_rollback_in_call_mode, test_vm_loadnext_rollbacks, test_vm_rollbacks}, + VmTesterBuilder, + }, vm_latest::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, + types::internals::ZkSyncVmState, BootloaderState, HistoryEnabled, HistoryMode, + SimpleMemory, ToTracerPointer, Vm, VmTracer, }, }; #[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), - ), - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); +fn vm_rollbacks() { + test_vm_rollbacks::>(); } #[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused( - loadnext_deploy_tx.initiator_account(), - loadnext_deploy_tx.nonce().unwrap(), - ) - .into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused( - loadnext_deploy_tx.initiator_account(), - loadnext_deploy_tx.nonce().unwrap(), - ) - .into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); +fn vm_loadnext_rollbacks() { + test_vm_loadnext_rollbacks::>(); } // Testing tracer that does not allow the recursion to go deeper than a certain limit @@ -209,36 +59,35 @@ impl VmTracer for MaxRecursionTracer { } #[test] -fn test_layered_rollback() { +fn layered_rollback() { // This test checks that the layered rollbacks work correctly, i.e. // the rollback by the operator will always revert all the changes - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; let DeployContractsTx { tx: deploy_tx, address, .. } = account.get_deploy_tx( - &loadnext_contract, + TestContract::load_test().bytecode, Some(&[Token::Uint(0.into())]), TxType::L2, ); vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); + let deployment_res = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!deployment_res.result.is_failed(), "transaction failed"); let loadnext_transaction = account.get_loadnext_transaction( address, LoadnextContractExecutionParams { - writes: 1, + initial_writes: 1, recursive_calls: 20, ..LoadnextContractExecutionParams::empty() }, @@ -259,7 +108,8 @@ fn test_layered_rollback() { max_recursion_depth: 15, } .into_tracer_pointer(); - vm.vm.inspect(&mut tracer.into(), VmExecutionMode::OneTx); + vm.vm + .inspect(&mut tracer.into(), InspectExecutionMode::OneTx); let nonce_val2 = vm .vm @@ -286,40 +136,11 @@ fn test_layered_rollback() { ); vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "transaction must not fail"); } #[test] fn rollback_in_call_mode() { - let counter_bytecode = read_test_contract(); - let counter_address = Address::repeat_byte(1); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::EthCall) - .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) - .with_random_rich_accounts(1) - .build(); - let account = &mut vm.rich_accounts[0]; - let tx = account.get_test_contract_transaction(counter_address, true, None, false, TxType::L2); - - let (compression_result, vm_result) = vm - .vm - .execute_transaction_with_bytecode_compression(tx, true); - compression_result.unwrap(); - assert_matches!( - vm_result.result, - ExecutionResult::Revert { output } - if output.to_string().contains("This method always reverts") - ); - - let storage_logs = vm - .vm - .get_current_execution_state() - .deduplicated_storage_logs; - assert!( - storage_logs.iter().all(|log| !log.is_write()), - "{storage_logs:?}" - ); + test_rollback_in_call_mode::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/secp256r1.rs b/core/lib/multivm/src/versions/vm_latest/tests/secp256r1.rs new file mode 100644 index 000000000000..11534a26ded2 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_latest/tests/secp256r1.rs @@ -0,0 +1,9 @@ +use crate::{ + versions::testonly::secp256r1::test_secp256r1, + vm_latest::{HistoryEnabled, Vm}, +}; + +#[test] +fn secp256r1() { + test_secp256r1::>(); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs b/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs deleted file mode 100644 index 93be9506a3b0..000000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs +++ /dev/null @@ -1,74 +0,0 @@ -use zk_evm_1_5_0::zkevm_opcode_defs::p256; -use zksync_system_constants::P256VERIFY_PRECOMPILE_ADDRESS; -use zksync_types::{web3::keccak256, Execute, H256, U256}; -use zksync_utils::h256_to_u256; - -use crate::{ - interface::{ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{tests::tester::VmTesterBuilder, HistoryEnabled}, -}; - -#[test] -fn test_sekp256r1() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_execution_mode(TxExecutionMode::EthCall) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - // The digest, secret key and public key were copied from the following test suit: `https://github.com/hyperledger/besu/blob/b6a6402be90339367d5bcabcd1cfd60df4832465/crypto/algorithms/src/test/java/org/hyperledger/besu/crypto/SECP256R1Test.java#L36` - let sk = p256::SecretKey::from_slice( - &hex::decode("519b423d715f8b581f4fa8ee59f4771a5b44c8130b4e3eacca54a56dda72b464").unwrap(), - ) - .unwrap(); - let sk = p256::ecdsa::SigningKey::from(sk); - - let digest = keccak256(&hex::decode("5905238877c77421f73e43ee3da6f2d9e2ccad5fc942dcec0cbd25482935faaf416983fe165b1a045ee2bcd2e6dca3bdf46c4310a7461f9a37960ca672d3feb5473e253605fb1ddfd28065b53cb5858a8ad28175bf9bd386a5e471ea7a65c17cc934a9d791e91491eb3754d03799790fe2d308d16146d5c9b0d0debd97d79ce8").unwrap()); - let public_key_encoded = hex::decode("1ccbe91c075fc7f4f033bfa248db8fccd3565de94bbfb12f3c59ff46c271bf83ce4014c68811f9a21a1fdb2c0e6113e06db7ca93b7404e78dc7ccd5ca89a4ca9").unwrap(); - - let (sig, _) = sk.sign_prehash_recoverable(&digest).unwrap(); - let (r, s) = sig.split_bytes(); - - let mut encoded_r = [0u8; 32]; - encoded_r.copy_from_slice(&r); - - let mut encoded_s = [0u8; 32]; - encoded_s.copy_from_slice(&s); - - let mut x = [0u8; 32]; - x.copy_from_slice(&public_key_encoded[0..32]); - - let mut y = [0u8; 32]; - y.copy_from_slice(&public_key_encoded[32..64]); - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(P256VERIFY_PRECOMPILE_ADDRESS), - calldata: [digest, encoded_r, encoded_s, x, y].concat(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - - let execution_result = vm.vm.execute(VmExecutionMode::Batch); - - let ExecutionResult::Success { output } = execution_result.result else { - panic!("batch failed") - }; - - let output = H256::from_slice(&output); - - assert_eq!( - h256_to_u256(output), - U256::from(1u32), - "verification was not successful" - ); -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs index cd020ee9f966..29072e66b1ea 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs @@ -1,83 +1,14 @@ -use assert_matches::assert_matches; - use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::tester::{TxType, VmTesterBuilder}, - HistoryDisabled, - }, + versions::testonly::simple_execution::{test_estimate_fee, test_simple_execute}, + vm_latest::{HistoryEnabled, Vm}, }; #[test] fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); + test_estimate_fee::>(); } #[test] fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); + test_simple_execute::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs index 126d174a6468..4cb03875a0f0 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs @@ -1,188 +1,14 @@ -use ethabi::Token; -use zksync_contracts::{load_contract, read_bytecode}; -use zksync_test_account::Account; -use zksync_types::{fee::Fee, Address, Execute, U256}; - use crate::{ - interface::{ - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - }, - vm_latest::{tests::tester::VmTesterBuilder, HistoryEnabled}, + versions::testonly::storage::{test_storage_behavior, test_transient_storage_behavior}, + vm_latest::{HistoryEnabled, Vm}, }; -#[derive(Debug, Default)] - -struct TestTxInfo { - calldata: Vec, - fee_overrides: Option, - should_fail: bool, -} - -fn test_storage(txs: Vec) -> u32 { - let bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let test_contract_address = Address::random(); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(txs.len() as u32) - .with_custom_contracts(vec![(bytecode, test_contract_address, false)]) - .build(); - - let mut last_result = None; - - for (id, tx) in txs.into_iter().enumerate() { - let TestTxInfo { - calldata, - fee_overrides, - should_fail, - } = tx; - - let account = &mut vm.rich_accounts[id]; - - vm.vm.make_snapshot(); - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value: 0.into(), - factory_deps: vec![], - }, - fee_overrides, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - if should_fail { - assert!(result.result.is_failed(), "Transaction should fail"); - vm.vm.rollback_to_the_latest_snapshot(); - } else { - assert!(!result.result.is_failed(), "Transaction should not fail"); - vm.vm.pop_snapshot_no_rollback(); - } - - last_result = Some(result); - } - - last_result.unwrap().statistics.pubdata_published -} - -fn test_storage_one_tx(second_tx_calldata: Vec) -> u32 { - test_storage(vec![ - TestTxInfo::default(), - TestTxInfo { - calldata: second_tx_calldata, - fee_overrides: None, - should_fail: false, - }, - ]) -} - -#[test] -fn test_storage_behavior() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - // In all of the tests below we provide the first tx to ensure that the tracers will not include - // the statistics from the start of the bootloader and will only include those for the transaction itself. - - let base_pubdata = test_storage_one_tx(vec![]); - let simple_test_pubdata = test_storage_one_tx( - contract - .function("simpleWrite") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - let resetting_write_pubdata = test_storage_one_tx( - contract - .function("resettingWrite") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - let resetting_write_via_revert_pubdata = test_storage_one_tx( - contract - .function("resettingWriteViaRevert") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - - assert_eq!(simple_test_pubdata - base_pubdata, 65); - assert_eq!(resetting_write_pubdata - base_pubdata, 34); - assert_eq!(resetting_write_via_revert_pubdata - base_pubdata, 34); -} - #[test] -fn test_transient_storage_behavior() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let first_tstore_test = contract - .function("testTransientStore") - .unwrap() - .encode_input(&[]) - .unwrap(); - // Second transaction checks that, as expected, the transient storage is cleared after the first transaction. - let second_tstore_test = contract - .function("assertTValue") - .unwrap() - .encode_input(&[Token::Uint(U256::zero())]) - .unwrap(); - - test_storage(vec![ - TestTxInfo { - calldata: first_tstore_test, - ..TestTxInfo::default() - }, - TestTxInfo { - calldata: second_tstore_test, - ..TestTxInfo::default() - }, - ]); +fn storage_behavior() { + test_storage_behavior::>(); } #[test] -fn test_transient_storage_behavior_panic() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let basic_tstore_test = contract - .function("tStoreAndRevert") - .unwrap() - .encode_input(&[Token::Uint(U256::one()), Token::Bool(false)]) - .unwrap(); - - let small_fee = Fee { - // Something very-very small to make the validation fail - gas_limit: 10_000.into(), - ..Account::default_fee() - }; - - test_storage(vec![ - TestTxInfo { - calldata: basic_tstore_test.clone(), - ..TestTxInfo::default() - }, - TestTxInfo { - fee_overrides: Some(small_fee), - should_fail: true, - ..TestTxInfo::default() - }, - TestTxInfo { - calldata: basic_tstore_test, - ..TestTxInfo::default() - }, - ]); +fn transient_storage_behavior() { + test_transient_storage_behavior::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs deleted file mode 100644 index c0ef52afaa52..000000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_5_0::{aux_structures::Timestamp, vm_state::VmLocalState}; -use zksync_types::{StorageKey, StorageValue, U256}; - -use crate::{ - interface::storage::WriteStorage, - vm_latest::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - utils::logs::StorageLogQuery, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder>, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_io_refunds: HistoryRecorder, H>, - pub(crate) returned_pubdata_costs: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.storage_frames_stack.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_io_refunds: self.state.storage.returned_io_refunds.clone(), - returned_pubdata_costs: self.state.storage.returned_pubdata_costs.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs deleted file mode 100644 index c3cc5d8d9803..000000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTester, VmTesterBuilder, -}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs deleted file mode 100644 index 1fe4232c7780..000000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs +++ /dev/null @@ -1,299 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::BaseSystemContracts; -use zksync_types::{ - block::L2BlockHasher, - fee_model::BatchFeeInput, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}, - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, - VmInterface, VmInterfaceExt, - }, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(&self.storage).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.bootloader_gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs index 2db37881352f..a2cd6af62114 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs @@ -1,54 +1,9 @@ -use zksync_types::{Execute, H160}; - use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_latest::{ - tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, + versions::testonly::tracing_execution_error::test_tracing_of_execution_errors, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(contract_address), - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); +fn tracing_of_execution_errors() { + test_tracing_of_execution_errors::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs index 2c380623636a..f37ebe6a3fb7 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs @@ -1,220 +1,16 @@ -use ethabi::Token; -use zksync_contracts::{load_contract, read_bytecode}; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{utils::storage_key_for_eth_balance, AccountTreeId, Address, Execute, U256}; -use zksync_utils::u256_to_h256; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::get_balance, - }, - HistoryEnabled, + versions::testonly::transfer::{ + test_reentrancy_protection_send_and_transfer, test_send_and_transfer, }, + vm_latest::{HistoryEnabled, Vm}, }; -enum TestOptions { - Send(U256), - Transfer(U256), -} - -fn test_send_or_transfer(test_option: TestOptions) { - let test_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let recipeint_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/Recipient.json", - ); - let test_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - - let test_contract_address = Address::random(); - let recipient_address = Address::random(); - - let (value, calldata) = match test_option { - TestOptions::Send(value) => ( - value, - test_abi - .function("send") - .unwrap() - .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) - .unwrap(), - ), - TestOptions::Transfer(value) => ( - value, - test_abi - .function("transfer") - .unwrap() - .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) - .unwrap(), - ), - }; - - let mut storage = get_empty_storage(); - storage.set_value( - storage_key_for_eth_balance(&test_contract_address), - u256_to_h256(value), - ); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ - (test_bytecode, test_contract_address, false), - (recipeint_bytecode, recipient_address, false), - ]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let tx_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !tx_result.result.is_failed(), - "Transaction wasn't successful" - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); - - let new_recipient_balance = get_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &recipient_address, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!(new_recipient_balance, value); -} - #[test] -fn test_send_and_transfer() { - test_send_or_transfer(TestOptions::Send(U256::zero())); - test_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); - test_send_or_transfer(TestOptions::Transfer(U256::zero())); - test_send_or_transfer(TestOptions::Transfer(U256::from(10).pow(18.into()))); -} - -fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { - let test_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let reentrant_recipeint_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", - ); - let test_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let reentrant_recipient_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", - ); - - let test_contract_address = Address::random(); - let reentrant_recipeint_address = Address::random(); - - let (value, calldata) = match test_option { - TestOptions::Send(value) => ( - value, - test_abi - .function("send") - .unwrap() - .encode_input(&[ - Token::Address(reentrant_recipeint_address), - Token::Uint(value), - ]) - .unwrap(), - ), - TestOptions::Transfer(value) => ( - value, - test_abi - .function("transfer") - .unwrap() - .encode_input(&[ - Token::Address(reentrant_recipeint_address), - Token::Uint(value), - ]) - .unwrap(), - ), - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ - (test_bytecode, test_contract_address, false), - ( - reentrant_recipeint_bytecode, - reentrant_recipeint_address, - false, - ), - ]) - .build(); - - // First transaction, the job of which is to warm up the slots for balance of the recipient as well as its storage variable. - let account = &mut vm.rich_accounts[0]; - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(reentrant_recipeint_address), - calldata: reentrant_recipient_abi - .function("setX") - .unwrap() - .encode_input(&[]) - .unwrap(), - value: U256::from(1), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let tx1_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !tx1_result.result.is_failed(), - "Transaction 1 wasn't successful" - ); - - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value, - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx2); - let tx2_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - tx2_result.result.is_failed(), - "Transaction 2 should have failed, but it succeeded" - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); +fn send_and_transfer() { + test_send_and_transfer::>(); } #[test] -fn test_reentrancy_protection_send_and_transfer() { - test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::zero())); - test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); - test_reentrancy_protection_send_or_transfer(TestOptions::Transfer(U256::zero())); - test_reentrancy_protection_send_or_transfer(TestOptions::Transfer( - U256::from(10).pow(18.into()), - )); +fn reentrancy_protection_send_and_transfer() { + test_reentrancy_protection_send_and_transfer::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs index d85a504de40f..9889e26e4d2b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs @@ -1,354 +1,21 @@ -use zk_evm_1_5_0::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_upgrade::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_test_contract}; use crate::{ - interface::{ - storage::WriteStorage, ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, - VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - }, - vm_latest::{ - tests::{ - tester::VmTesterBuilder, - utils::{read_complex_upgrade, verify_required_storage}, - }, - HistoryEnabled, + versions::testonly::upgrade::{ + test_complex_upgrader, test_force_deploy_upgrade, test_protocol_upgrade_is_first, }, + vm_latest::{HistoryEnabled, Vm}, }; -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block #[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); +fn protocol_upgrade_is_first() { + test_protocol_upgrade_is_first::>(); } -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. #[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); +fn force_deploy_upgrade() { + test_force_deploy_upgrade::>(); } -/// Here we show how the work with the complex upgrader could be done #[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in user space - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecode hash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), - calldata, - factory_deps: vec![], - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implementation itself -// For the explanation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: Some(COMPLEX_UPGRADER_ADDRESS), - calldata: complex_upgrader_calldata, - factory_deps: vec![], - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") +fn complex_upgrader() { + test_complex_upgrader::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs deleted file mode 100644 index 34582fb9ddee..000000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs +++ /dev/null @@ -1,142 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bootloader_code, read_bytecode, read_zbin_bytecode, BaseSystemContracts, - SystemContractCode, -}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::{ - interface::storage::{StoragePtr, WriteStorage}, - vm_latest::{tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode}, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_bootloader_code(test); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn read_simple_transfer_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/simple-transfer/simple-transfer.sol/SimpleTransfer.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn load_precompiles_contract() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -pub(crate) fn read_expensive_contract() -> (Vec, Contract) { - const PATH: &str = - "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; - (read_bytecode(PATH), load_contract(PATH)) -} - -pub(crate) fn read_proxy_counter_contract() -> (Vec, Contract) { - const PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/proxy_counter.sol/ProxyCounter.json"; - (read_bytecode(PATH), load_contract(PATH)) -} diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs b/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs index 2ae5e81a328c..8755b98ddb8c 100755 --- a/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs @@ -32,7 +32,7 @@ use crate::{ CircuitsTracer, RefundsTracer, ResultTracer, }, types::internals::ZkSyncVmState, - vm::MultiVMSubversion, + vm::MultiVmSubversion, VmTracer, }, }; @@ -65,7 +65,7 @@ pub struct DefaultExecutionTracer { pub(crate) circuits_tracer: CircuitsTracer, // This tracer is responsible for handling EVM deployments and providing the data to the code decommitter. pub(crate) evm_deploy_tracer: Option>, - subversion: MultiVMSubversion, + subversion: MultiVmSubversion, storage: StoragePtr, _phantom: PhantomData, } @@ -80,7 +80,7 @@ impl DefaultExecutionTracer { storage: StoragePtr, refund_tracer: Option>, pubdata_tracer: Option>, - subversion: MultiVMSubversion, + subversion: MultiVmSubversion, ) -> Self { Self { tx_has_been_processed: false, @@ -228,7 +228,10 @@ impl Tracer for DefaultExecutionTracer { ); match hook { - VmHook::TxHasEnded => self.tx_has_been_processed = true, + VmHook::TxHasEnded if matches!(self.execution_mode, VmExecutionMode::OneTx) => { + self.result_tracer.tx_finished_in_one_tx_mode = true; + self.tx_has_been_processed = true; + } VmHook::NoValidationEntered => self.in_account_validation = false, VmHook::AccountValidationEntered => self.in_account_validation = true, VmHook::FinalBatchInfo => self.final_batch_info_requested = true, diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs index becc4f225276..2e6ab8089eb0 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs @@ -7,14 +7,18 @@ use zk_evm_1_5_0::{ FarCallOpcode, FatPointer, Opcode, CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER, }, }; -use zksync_types::{CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS}; -use zksync_utils::{bytecode::hash_evm_bytecode, bytes_to_be_words, h256_to_u256}; -use zksync_vm_interface::storage::StoragePtr; +use zksync_types::{ + bytecode::BytecodeHash, CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, +}; use super::{traits::VmTracer, utils::read_pointer}; use crate::{ - interface::{storage::WriteStorage, tracer::TracerExecutionStatus}, + interface::{ + storage::{StoragePtr, WriteStorage}, + tracer::TracerExecutionStatus, + }, tracers::dynamic::vm_1_5_0::DynTracer, + utils::bytecode::bytes_to_be_words, vm_latest::{BootloaderState, HistoryMode, SimpleMemory, ZkSyncVmState}, }; @@ -89,14 +93,13 @@ impl VmTracer for EvmDeployTracer { state: &mut ZkSyncVmState, _bootloader_state: &mut BootloaderState, ) -> TracerExecutionStatus { + let timestamp = Timestamp(state.local_state.timestamp); for published_bytecode in mem::take(&mut self.pending_bytecodes) { - let hash = hash_evm_bytecode(&published_bytecode); - let as_words = bytes_to_be_words(published_bytecode); - - state.decommittment_processor.populate( - vec![(h256_to_u256(hash), as_words)], - Timestamp(state.local_state.timestamp), - ); + let hash = BytecodeHash::for_evm_bytecode(&published_bytecode).value_u256(); + let as_words = bytes_to_be_words(&published_bytecode); + state + .decommittment_processor + .insert_dynamic_bytecode(hash, as_words, timestamp); } TracerExecutionStatus::Continue } diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs index 32f3984834c8..3698914630dd 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs @@ -1,32 +1,39 @@ -use std::marker::PhantomData; +use std::{marker::PhantomData, rc::Rc}; use circuit_sequencer_api_1_5_0::sort_storage_access::sort_storage_access_queries; use zk_evm_1_5_0::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, }; -use zksync_types::{writes::StateDiffRecord, AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS}; -use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; +use zksync_types::{ + h256_to_u256, u256_to_h256, writes::StateDiffRecord, AccountTreeId, StorageKey, + L1_MESSENGER_ADDRESS, +}; +use zksync_vm_interface::pubdata::PubdataBuilder; use crate::{ interface::{ + pubdata::{L1MessengerL2ToL1Log, PubdataInput}, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, }, tracers::dynamic::vm_1_5_0::DynTracer, - utils::events::{ - extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + utils::{ + bytecode::be_words_to_bytes, + events::{ + extract_bytecode_publication_requests_from_l1_messenger, + extract_l2tol1logs_from_l1_messenger, + }, }, vm_latest::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, constants::BOOTLOADER_HEAP_PAGE, old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, tracers::{traits::VmTracer, utils::VmHook}, - types::internals::{PubdataInput, ZkSyncVmState}, + types::internals::ZkSyncVmState, utils::logs::collect_events_and_l1_system_logs_after_timestamp, - vm::MultiVMSubversion, + vm::MultiVmSubversion, StorageOracle, }, }; @@ -40,7 +47,8 @@ pub(crate) struct PubdataTracer { // For testing purposes it might be helpful to supply an exact set of state diffs to be provided // to the L1Messenger. enforced_state_diffs: Option>, - subversion: MultiVMSubversion, + subversion: MultiVmSubversion, + pubdata_builder: Option>, _phantom_data: PhantomData, } @@ -48,7 +56,8 @@ impl PubdataTracer { pub(crate) fn new( l1_batch_env: L1BatchEnv, execution_mode: VmExecutionMode, - subversion: MultiVMSubversion, + subversion: MultiVmSubversion, + pubdata_builder: Option>, ) -> Self { Self { l1_batch_env, @@ -56,6 +65,7 @@ impl PubdataTracer { execution_mode, enforced_state_diffs: None, subversion, + pubdata_builder, _phantom_data: Default::default(), } } @@ -67,7 +77,8 @@ impl PubdataTracer { l1_batch_env: L1BatchEnv, execution_mode: VmExecutionMode, forced_state_diffs: Vec, - subversion: MultiVMSubversion, + subversion: MultiVmSubversion, + pubdata_builder: Option>, ) -> Self { Self { l1_batch_env, @@ -75,6 +86,7 @@ impl PubdataTracer { execution_mode, enforced_state_diffs: Some(forced_state_diffs), subversion, + pubdata_builder, _phantom_data: Default::default(), } } @@ -125,15 +137,13 @@ impl PubdataTracer { bytecode_publication_requests .iter() .map(|bytecode_publication_request| { - state + let bytecode_words = state .decommittment_processor .known_bytecodes .inner() .get(&h256_to_u256(bytecode_publication_request.bytecode_hash)) - .unwrap() - .iter() - .flat_map(u256_to_bytes_be) - .collect() + .unwrap(); + be_words_to_bytes(bytecode_words) }) .collect() } @@ -221,13 +231,22 @@ impl VmTracer for PubdataTracer { if self.pubdata_info_requested { let pubdata_input = self.build_pubdata_input(state); - // Save the pubdata for the future initial bootloader memory building - bootloader_state.set_pubdata_input(pubdata_input.clone()); - // Apply the pubdata to the current memory let mut memory_to_apply = vec![]; - apply_pubdata_to_memory(&mut memory_to_apply, pubdata_input); + apply_pubdata_to_memory( + &mut memory_to_apply, + self.pubdata_builder + .as_ref() + .expect("`pubdata_builder` is required to finish batch") + .as_ref(), + &pubdata_input, + bootloader_state.protocol_version(), + ); + + // Save the pubdata for the future initial bootloader memory building + bootloader_state.set_pubdata_input(pubdata_input); + state.memory.populate_page( BOOTLOADER_HEAP_PAGE as usize, memory_to_apply, diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tracers/refunds.rs index 78826a16313d..6ef251c2db98 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/refunds.rs @@ -5,8 +5,7 @@ use zk_evm_1_5_0::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, }; -use zksync_types::{H256, U256}; -use zksync_utils::ceil_div_u256; +use zksync_types::{ceil_div_u256, H256, U256}; use crate::{ interface::{ @@ -25,7 +24,7 @@ use crate::{ }, types::internals::ZkSyncVmState, utils::fee::get_batch_base_fee, - vm::MultiVMSubversion, + vm::MultiVmSubversion, }, }; @@ -51,12 +50,12 @@ pub(crate) struct RefundsTracer { spent_pubdata_counter_before: u32, l1_batch: L1BatchEnv, pubdata_published: u32, - subversion: MultiVMSubversion, + subversion: MultiVmSubversion, _phantom: PhantomData, } impl RefundsTracer { - pub(crate) fn new(l1_batch: L1BatchEnv, subversion: MultiVMSubversion) -> Self { + pub(crate) fn new(l1_batch: L1BatchEnv, subversion: MultiVmSubversion) -> Self { Self { pending_refund_request: None, refund_gas: 0, diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/result_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/result_tracer.rs index 6ba00f4a0998..80a3147f65d2 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/result_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/result_tracer.rs @@ -23,7 +23,7 @@ use crate::{ utils::{get_vm_hook_params, read_pointer, VmHook}, }, types::internals::ZkSyncVmState, - vm::MultiVMSubversion, + vm::MultiVmSubversion, BootloaderState, HistoryMode, SimpleMemory, }, }; @@ -102,19 +102,22 @@ pub(crate) struct ResultTracer { execution_mode: VmExecutionMode, far_call_tracker: FarCallTracker, - subversion: MultiVMSubversion, + subversion: MultiVmSubversion, + + pub(crate) tx_finished_in_one_tx_mode: bool, _phantom: PhantomData, } impl ResultTracer { - pub(crate) fn new(execution_mode: VmExecutionMode, subversion: MultiVMSubversion) -> Self { + pub(crate) fn new(execution_mode: VmExecutionMode, subversion: MultiVmSubversion) -> Self { Self { result: None, bootloader_out_of_gas: false, execution_mode, far_call_tracker: Default::default(), subversion, + tx_finished_in_one_tx_mode: false, _phantom: PhantomData, } } @@ -297,7 +300,7 @@ impl ResultTracer { let has_failed = tx_has_failed(state, bootloader_state.current_tx() as u32, self.subversion); - if has_failed { + if self.tx_finished_in_one_tx_mode && has_failed { self.result = Some(Result::Error { error_reason: VmRevertReason::General { msg: "Transaction reverted with empty reason. Possibly out of gas" @@ -306,9 +309,9 @@ impl ResultTracer { }, }); } else { - self.result = Some(self.result.clone().unwrap_or(Result::Success { + self.result = Some(Result::Success { return_data: vec![], - })); + }); } } } @@ -333,7 +336,7 @@ impl ResultTracer { pub(crate) fn tx_has_failed( state: &ZkSyncVmState, tx_id: u32, - subversion: MultiVMSubversion, + subversion: MultiVmSubversion, ) -> bool { let mem_slot = get_result_success_first_slot(subversion) + tx_id; let mem_value = state diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs index 0a11f5d3f849..6f81a3ac8de5 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs @@ -9,8 +9,7 @@ use zksync_system_constants::{ ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, SECP256R1_VERIFY_PRECOMPILE_ADDRESS, SHA256_PRECOMPILE_ADDRESS, }; -use zksync_types::U256; -use zksync_utils::u256_to_h256; +use zksync_types::{u256_to_h256, U256}; use crate::vm_latest::{ constants::{ @@ -22,7 +21,7 @@ use crate::vm_latest::{ memory::SimpleMemory, utils::{aux_heap_page_from_base, heap_page_from_base}, }, - vm::MultiVMSubversion, + vm::MultiVmSubversion, }; #[derive(Clone, Debug, Copy)] @@ -48,7 +47,7 @@ impl VmHook { pub(crate) fn from_opcode_memory( state: &VmLocalStateData<'_>, data: &BeforeExecutionData, - subversion: MultiVMSubversion, + subversion: MultiVmSubversion, ) -> Self { let opcode_variant = data.opcode.variant; let heap_page = @@ -90,7 +89,7 @@ impl VmHook { pub(crate) fn get_debug_log( state: &VmLocalStateData<'_>, memory: &SimpleMemory, - subversion: MultiVMSubversion, + subversion: MultiVmSubversion, ) -> String { let vm_hook_params: Vec<_> = get_vm_hook_params(memory, subversion) .into_iter() @@ -162,7 +161,7 @@ pub(crate) fn print_debug_if_needed( state: &VmLocalStateData<'_>, memory: &SimpleMemory, latest_returndata_ptr: Option, - subversion: MultiVMSubversion, + subversion: MultiVmSubversion, ) { let log = match hook { VmHook::DebugLog => get_debug_log(state, memory, subversion), @@ -211,7 +210,7 @@ pub(crate) fn get_calldata_page_via_abi(far_call_abi: &FarCallABI, base_page: Me } pub(crate) fn get_vm_hook_params( memory: &SimpleMemory, - subversion: MultiVMSubversion, + subversion: MultiVmSubversion, ) -> Vec { let start_position = get_vm_hook_params_start_position(subversion); memory.dump_page_content_as_u256_words( diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs index 7dc60ec5b0fb..601b7b8bd014 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs @@ -1,9 +1,7 @@ -pub(crate) use pubdata::PubdataInput; pub(crate) use snapshot::VmSnapshot; pub(crate) use transaction_data::TransactionData; pub(crate) use vm_state::new_vm_state; pub use vm_state::ZkSyncVmState; -mod pubdata; mod snapshot; mod transaction_data; mod vm_state; diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs deleted file mode 100644 index d07732ae4350..000000000000 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs +++ /dev/null @@ -1,123 +0,0 @@ -use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; - -use crate::utils::events::L1MessengerL2ToL1Log; - -/// Struct based on which the pubdata blob is formed -#[derive(Debug, Clone, Default)] -pub(crate) struct PubdataInput { - pub(crate) user_logs: Vec, - pub(crate) l2_to_l1_messages: Vec>, - pub(crate) published_bytecodes: Vec>, - pub(crate) state_diffs: Vec, -} - -impl PubdataInput { - pub(crate) fn build_pubdata(self, with_uncompressed_state_diffs: bool) -> Vec { - let mut l1_messenger_pubdata = vec![]; - - let PubdataInput { - user_logs, - l2_to_l1_messages, - published_bytecodes, - state_diffs, - } = self; - - // Encoding user L2->L1 logs. - // Format: `[(numberOfL2ToL1Logs as u32) || l2tol1logs[1] || ... || l2tol1logs[n]]` - l1_messenger_pubdata.extend((user_logs.len() as u32).to_be_bytes()); - for l2tol1log in user_logs { - l1_messenger_pubdata.extend(l2tol1log.packed_encoding()); - } - - // Encoding L2->L1 messages - // Format: `[(numberOfMessages as u32) || (messages[1].len() as u32) || messages[1] || ... || (messages[n].len() as u32) || messages[n]]` - l1_messenger_pubdata.extend((l2_to_l1_messages.len() as u32).to_be_bytes()); - for message in l2_to_l1_messages { - l1_messenger_pubdata.extend((message.len() as u32).to_be_bytes()); - l1_messenger_pubdata.extend(message); - } - - // Encoding bytecodes - // Format: `[(numberOfBytecodes as u32) || (bytecodes[1].len() as u32) || bytecodes[1] || ... || (bytecodes[n].len() as u32) || bytecodes[n]]` - l1_messenger_pubdata.extend((published_bytecodes.len() as u32).to_be_bytes()); - for bytecode in published_bytecodes { - l1_messenger_pubdata.extend((bytecode.len() as u32).to_be_bytes()); - l1_messenger_pubdata.extend(bytecode); - } - - // Encoding state diffs - // Format: `[size of compressed state diffs u32 || compressed state diffs || (# state diffs: intial + repeated) as u32 || sorted state diffs by ]` - let state_diffs_compressed = compress_state_diffs(state_diffs.clone()); - l1_messenger_pubdata.extend(state_diffs_compressed); - - if with_uncompressed_state_diffs { - l1_messenger_pubdata.extend((state_diffs.len() as u32).to_be_bytes()); - for state_diff in state_diffs { - l1_messenger_pubdata.extend(state_diff.encode_padded()); - } - } - - l1_messenger_pubdata - } -} - -#[cfg(test)] -mod tests { - use zksync_system_constants::{ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS}; - use zksync_utils::u256_to_h256; - - use super::*; - - #[test] - fn test_basic_pubdata_building() { - // Just using some constant addresses for tests - let addr1 = BOOTLOADER_ADDRESS; - let addr2 = ACCOUNT_CODE_STORAGE_ADDRESS; - - let user_logs = vec![L1MessengerL2ToL1Log { - l2_shard_id: 0, - is_service: false, - tx_number_in_block: 0, - sender: addr1, - key: 1.into(), - value: 128.into(), - }]; - - let l2_to_l1_messages = vec![hex::decode("deadbeef").unwrap()]; - - let published_bytecodes = vec![hex::decode("aaaabbbb").unwrap()]; - - // For covering more cases, we have two state diffs: - // One with enumeration index present (and so it is a repeated write) and the one without it. - let state_diffs = vec![ - StateDiffRecord { - address: addr2, - key: 155.into(), - derived_key: u256_to_h256(125.into()).0, - enumeration_index: 12, - initial_value: 11.into(), - final_value: 12.into(), - }, - StateDiffRecord { - address: addr2, - key: 156.into(), - derived_key: u256_to_h256(126.into()).0, - enumeration_index: 0, - initial_value: 0.into(), - final_value: 14.into(), - }, - ]; - - let input = PubdataInput { - user_logs, - l2_to_l1_messages, - published_bytecodes, - state_diffs, - }; - - let pubdata = - ethabi::encode(&[ethabi::Token::Bytes(input.build_pubdata(true))])[32..].to_vec(); - - assert_eq!(hex::encode(pubdata), "00000000000000000000000000000000000000000000000000000000000002c700000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000004aaaabbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"); - } -} diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs index 90948f2f89fd..33f923414eb3 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs @@ -1,19 +1,24 @@ use std::convert::TryInto; use zksync_types::{ + address_to_h256, + bytecode::BytecodeHash, ethabi::{encode, Address, Token}, fee::{encoding_len, Fee}, + h256_to_u256, l1::is_l1_tx_type, l2::{L2Tx, TransactionType}, transaction_request::{PaymasterParams, TransactionRequest}, web3::Bytes, Execute, ExecuteTransactionCommon, L2ChainId, L2TxCommonData, Nonce, Transaction, H256, U256, }; -use zksync_utils::{address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; -use crate::vm_latest::{ - constants::{MAX_GAS_PER_PUBDATA_BYTE, TX_MAX_COMPUTE_GAS_LIMIT}, - utils::overhead::derive_overhead, +use crate::{ + utils::bytecode::bytes_to_be_words, + vm_latest::{ + constants::{MAX_GAS_PER_PUBDATA_BYTE, TX_MAX_COMPUTE_GAS_LIMIT}, + utils::overhead::derive_overhead, + }, }; /// This structure represents the data that is used by @@ -203,16 +208,13 @@ impl TransactionData { let factory_deps_hashes = self .factory_deps .iter() - .map(|dep| h256_to_u256(hash_bytecode(dep))) + .map(|dep| BytecodeHash::for_bytecode(dep).value_u256()) .collect(); self.abi_encode_with_custom_factory_deps(factory_deps_hashes) } pub(crate) fn into_tokens(self) -> Vec { - let bytes = self.abi_encode(); - assert!(bytes.len() % 32 == 0); - - bytes_to_be_words(bytes) + bytes_to_be_words(&self.abi_encode()) } pub(crate) fn overhead_gas(&self) -> u32 { diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs index cb4b13eecdf0..03f306f36c52 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs @@ -11,14 +11,14 @@ use zk_evm_1_5_0::{ }, }; use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::{block::L2BlockHasher, Address, L2BlockNumber}; -use zksync_utils::h256_to_u256; +use zksync_types::{block::L2BlockHasher, h256_to_u256, Address, L2BlockNumber}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, L1BatchEnv, L2Block, SystemEnv, }, + utils::bytecode::bytes_to_be_words, vm_latest::{ bootloader_state::BootloaderState, constants::BOOTLOADER_HEAP_PAGE, @@ -83,36 +83,25 @@ pub(crate) fn new_vm_state( let mut memory = SimpleMemory::default(); let event_sink = InMemoryEventSink::default(); let precompiles_processor = PrecompilesProcessorWithHistory::::default(); + let mut decommittment_processor: DecommitterOracle = DecommitterOracle::new(storage); - - decommittment_processor.populate( - vec![( - h256_to_u256(system_env.base_system_smart_contracts.default_aa.hash), - system_env - .base_system_smart_contracts - .default_aa - .code - .clone(), - )], - Timestamp(0), - ); - + let mut initial_bytecodes = vec![( + h256_to_u256(system_env.base_system_smart_contracts.default_aa.hash), + bytes_to_be_words(&system_env.base_system_smart_contracts.default_aa.code), + )]; if let Some(evm_emulator) = &system_env.base_system_smart_contracts.evm_emulator { - decommittment_processor.populate( - vec![(h256_to_u256(evm_emulator.hash), evm_emulator.code.clone())], - Timestamp(0), - ); + initial_bytecodes.push(( + h256_to_u256(evm_emulator.hash), + bytes_to_be_words(&evm_emulator.code), + )); } + decommittment_processor.populate(initial_bytecodes, Timestamp(0)); memory.populate( vec![( BOOTLOADER_CODE_PAGE, - system_env - .base_system_smart_contracts - .bootloader - .code - .clone(), + bytes_to_be_words(&system_env.base_system_smart_contracts.bootloader.code), )], Timestamp(0), ); @@ -191,6 +180,7 @@ pub(crate) fn new_vm_state( system_env.execution_mode, bootloader_initial_memory, first_l2_block, + system_env.version, ); (vm, bootloader_state) diff --git a/core/lib/multivm/src/versions/vm_latest/types/l1_batch.rs b/core/lib/multivm/src/versions/vm_latest/types/l1_batch.rs index b3bf15cb1be5..89b22d328ac5 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/l1_batch.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/l1_batch.rs @@ -1,5 +1,4 @@ -use zksync_types::U256; -use zksync_utils::{address_to_u256, h256_to_u256}; +use zksync_types::{address_to_u256, h256_to_u256, U256}; use crate::{interface::L1BatchEnv, vm_latest::utils::fee::get_batch_base_fee}; diff --git a/core/lib/multivm/src/versions/vm_latest/utils/fee.rs b/core/lib/multivm/src/versions/vm_latest/utils/fee.rs index 666fcca87e12..58b457dce68a 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/fee.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/fee.rs @@ -1,6 +1,5 @@ //! Utility functions for vm use zksync_types::fee_model::PubdataIndependentBatchFeeModelInput; -use zksync_utils::ceil_div; use crate::{interface::L1BatchEnv, vm_latest::constants::MAX_GAS_PER_PUBDATA_BYTE}; @@ -18,11 +17,14 @@ pub(crate) fn derive_base_fee_and_gas_per_pubdata( // publish enough public data while compensating us for it. let base_fee = std::cmp::max( fair_l2_gas_price, - ceil_div(fair_pubdata_price, MAX_GAS_PER_PUBDATA_BYTE), + fair_pubdata_price.div_ceil(MAX_GAS_PER_PUBDATA_BYTE), ); - let gas_per_pubdata = ceil_div(fair_pubdata_price, base_fee); - + let gas_per_pubdata = if fair_pubdata_price == 0 { + 0 + } else { + fair_pubdata_price.div_ceil(base_fee) + }; (base_fee, gas_per_pubdata) } diff --git a/core/lib/multivm/src/versions/vm_latest/utils/l2_blocks.rs b/core/lib/multivm/src/versions/vm_latest/utils/l2_blocks.rs index 59d3eb0ef0fc..840f1687ccfa 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/l2_blocks.rs @@ -4,9 +4,9 @@ use zksync_system_constants::{ SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES, }; use zksync_types::{ - block::unpack_block_info, web3::keccak256, AccountTreeId, L2BlockNumber, StorageKey, H256, U256, + block::unpack_block_info, h256_to_u256, u256_to_h256, web3::keccak256, AccountTreeId, + L2BlockNumber, StorageKey, H256, U256, }; -use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::interface::{ storage::{ReadStorage, StoragePtr}, diff --git a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs index aeb66755f514..97483633bc54 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs @@ -1,9 +1,6 @@ //! Utility functions for the VM. -use once_cell::sync::Lazy; use zk_evm_1_5_0::aux_structures::MemoryPage; -use zksync_types::{H256, KNOWN_CODES_STORAGE_ADDRESS}; -use zksync_vm_interface::VmEvent; pub mod fee; pub mod l2_blocks; @@ -14,24 +11,3 @@ pub mod transaction_encoding; pub const fn heap_page_from_base(base: MemoryPage) -> MemoryPage { MemoryPage(base.0 + 2) } - -/// Extracts all bytecodes marked as known on the system contracts. -pub fn extract_bytecodes_marked_as_known(all_generated_events: &[VmEvent]) -> Vec { - static PUBLISHED_BYTECODE_SIGNATURE: Lazy = Lazy::new(|| { - ethabi::long_signature( - "MarkedAsKnown", - &[ethabi::ParamType::FixedBytes(32), ethabi::ParamType::Bool], - ) - }); - - all_generated_events - .iter() - .filter(|event| { - // Filter events from the deployer contract that match the expected signature. - event.address == KNOWN_CODES_STORAGE_ADDRESS - && event.indexed_topics.len() == 3 - && event.indexed_topics[0] == *PUBLISHED_BYTECODE_SIGNATURE - }) - .map(|event| event.indexed_topics[1]) - .collect() -} diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index f4cc1580e935..ada20af9fa3c 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -1,27 +1,29 @@ -use std::collections::HashMap; +use std::{collections::HashMap, rc::Rc}; use circuit_sequencer_api_1_5_0::sort_storage_access::sort_storage_access_queries; use zksync_types::{ + h256_to_u256, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, + u256_to_h256, vm::VmVersion, Transaction, H256, }; -use zksync_utils::{be_words_to_bytes, h256_to_u256, u256_to_h256}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmTrackingContracts, }, - utils::events::extract_l2tol1logs_from_l1_messenger, + utils::{bytecode::be_words_to_bytes, events::extract_l2tol1logs_from_l1_messenger}, vm_latest::{ bootloader_state::BootloaderState, old_vm::{events::merge_events, history_recorder::HistoryEnabled}, - tracers::dispatcher::TracerDispatcher, + tracers::{dispatcher::TracerDispatcher, PubdataTracer}, types::internals::{new_vm_state, VmSnapshot, ZkSyncVmState}, }, HistoryMode, @@ -33,14 +35,16 @@ use crate::{ /// version was released with increased bootloader memory. The version with the small bootloader memory /// is available only on internal staging environments. #[derive(Debug, Copy, Clone)] -pub(crate) enum MultiVMSubversion { +pub(crate) enum MultiVmSubversion { /// The initial version of v1.5.0, available only on staging environments. SmallBootloaderMemory, /// The final correct version of v1.5.0 IncreasedBootloaderMemory, + /// VM for post-gateway versions. + Gateway, } -impl MultiVMSubversion { +impl MultiVmSubversion { #[cfg(test)] pub(crate) fn latest() -> Self { Self::IncreasedBootloaderMemory @@ -49,12 +53,13 @@ impl MultiVMSubversion { #[derive(Debug)] pub(crate) struct VmVersionIsNotVm150Error; -impl TryFrom for MultiVMSubversion { +impl TryFrom for MultiVmSubversion { type Error = VmVersionIsNotVm150Error; fn try_from(value: VmVersion) -> Result { match value { VmVersion::Vm1_5_0SmallBootloaderMemory => Ok(Self::SmallBootloaderMemory), VmVersion::Vm1_5_0IncreasedBootloaderMemory => Ok(Self::IncreasedBootloaderMemory), + VmVersion::VmGateway => Ok(Self::Gateway), _ => Err(VmVersionIsNotVm150Error), } } @@ -72,7 +77,7 @@ pub struct Vm { pub(crate) batch_env: L1BatchEnv, // Snapshots for the current run pub(crate) snapshots: Vec, - pub(crate) subversion: MultiVMSubversion, + pub(crate) subversion: MultiVmSubversion, _phantom: std::marker::PhantomData, } @@ -81,16 +86,24 @@ impl Vm { self.state.local_state.callstack.current.ergs_remaining } - pub(crate) fn decommit_bytecodes(&self, hashes: &[H256]) -> HashMap> { - let bytecodes = hashes.iter().map(|&hash| { - let bytecode_words = self - .state - .decommittment_processor + pub(crate) fn decommit_dynamic_bytecodes( + &self, + candidate_hashes: impl Iterator, + ) -> HashMap> { + let decommitter = &self.state.decommittment_processor; + let bytecodes = candidate_hashes.filter_map(|hash| { + let int_hash = h256_to_u256(hash); + if !decommitter.dynamic_bytecode_hashes.contains(&int_hash) { + return None; + } + let bytecode = decommitter .known_bytecodes .inner() - .get(&h256_to_u256(hash)) - .unwrap_or_else(|| panic!("Bytecode with hash {hash:?} not found")); - (hash, be_words_to_bytes(bytecode_words)) + .get(&int_hash) + .unwrap_or_else(|| { + panic!("Bytecode with hash {hash:?} not found"); + }); + Some((hash, be_words_to_bytes(bytecode))) }); bytecodes.collect() } @@ -134,18 +147,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode, None) + self.inspect_inner(tracer, execution_mode.into(), None) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -177,19 +195,30 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + let pubdata_tracer = Some(PubdataTracer::new( + self.batch_env.clone(), + VmExecutionMode::Batch, + self.subversion, + Some(pubdata_builder.clone()), + )); + + let result = self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + pubdata_tracer, + ); let execution_state = self.get_current_execution_state(); - let bootloader_memory = self.bootloader_state.bootloader_memory(); + let bootloader_memory = self + .bootloader_state + .bootloader_memory(pubdata_builder.as_ref()); FinishedL1Batch { block_tip_execution_result: result, final_execution_state: execution_state, final_bootloader_memory: Some(bootloader_memory), pubdata_input: Some( self.bootloader_state - .get_pubdata_information() - .clone() - .build_pubdata(false), + .settlement_layer_pubdata(pubdata_builder.as_ref()), ), state_diffs: Some( self.bootloader_state @@ -218,7 +247,7 @@ impl Vm { batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr, - subversion: MultiVMSubversion, + subversion: MultiVmSubversion, ) -> Self { let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); Self { diff --git a/core/lib/multivm/src/versions/vm_m5/events.rs b/core/lib/multivm/src/versions/vm_m5/events.rs index a444ad37feb5..659b41cc2060 100644 --- a/core/lib/multivm/src/versions/vm_m5/events.rs +++ b/core/lib/multivm/src/versions/vm_m5/events.rs @@ -1,8 +1,7 @@ use zk_evm_1_3_1::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use zksync_types::{h256_to_address, L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use crate::interface::VmEvent; +use crate::{interface::VmEvent, utils::bytecode::be_chunks_to_h256_words}; #[derive(Clone)] pub struct SolidityLikeEvent { @@ -135,7 +134,7 @@ pub fn merge_events(events: Vec) -> Vec { .filter(|e| e.address == EVENT_WRITER_ADDRESS) .map(|event| { // The events writer events where the first topic is the actual address of the event and the rest of the topics are real topics - let address = h256_to_account_address(&H256(event.topics[0])); + let address = h256_to_address(&H256(event.topics[0])); let topics = event.topics.into_iter().skip(1).collect(); SolidityLikeEvent { diff --git a/core/lib/multivm/src/versions/vm_m5/history_recorder.rs b/core/lib/multivm/src/versions/vm_m5/history_recorder.rs index f744be32d0bf..f7923e42b667 100644 --- a/core/lib/multivm/src/versions/vm_m5/history_recorder.rs +++ b/core/lib/multivm/src/versions/vm_m5/history_recorder.rs @@ -9,8 +9,7 @@ use zk_evm_1_3_1::{ vm_state::PrimitiveValue, zkevm_opcode_defs::{self}, }; -use zksync_types::{StorageKey, U256}; -use zksync_utils::{h256_to_u256, u256_to_h256}; +use zksync_types::{h256_to_u256, u256_to_h256, StorageKey, U256}; use crate::vm_m5::storage::{Storage, StoragePtr}; diff --git a/core/lib/multivm/src/versions/vm_m5/oracle_tools.rs b/core/lib/multivm/src/versions/vm_m5/oracle_tools.rs index 32930f31cd71..f430ad346387 100644 --- a/core/lib/multivm/src/versions/vm_m5/oracle_tools.rs +++ b/core/lib/multivm/src/versions/vm_m5/oracle_tools.rs @@ -10,7 +10,7 @@ use crate::vm_m5::{ storage::StorageOracle, }, storage::{Storage, StoragePtr}, - vm_instance::MultiVMSubversion, + vm_instance::MultiVmSubversion, }; #[derive(Debug)] @@ -25,7 +25,7 @@ pub struct OracleTools { } impl OracleTools { - pub fn new(storage_pointer: StoragePtr, refund_state: MultiVMSubversion) -> Self { + pub fn new(storage_pointer: StoragePtr, refund_state: MultiVmSubversion) -> Self { Self { storage: StorageOracle::new(storage_pointer.clone(), refund_state), memory: SimpleMemory::default(), diff --git a/core/lib/multivm/src/versions/vm_m5/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_m5/oracles/decommitter.rs index bc43c72966ea..7e2264201e11 100644 --- a/core/lib/multivm/src/versions/vm_m5/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_m5/oracles/decommitter.rs @@ -6,13 +6,15 @@ use zk_evm_1_3_1::{ DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, }, }; -use zksync_types::U256; -use zksync_utils::{bytecode::bytecode_len_in_words, bytes_to_be_words, u256_to_h256}; +use zksync_types::{u256_to_h256, U256}; use super::OracleWithHistory; -use crate::vm_m5::{ - history_recorder::HistoryRecorder, - storage::{Storage, StoragePtr}, +use crate::{ + utils::bytecode::{bytecode_len_in_words, bytes_to_be_words}, + vm_m5::{ + history_recorder::HistoryRecorder, + storage::{Storage, StoragePtr}, + }, }; #[derive(Debug)] @@ -53,7 +55,7 @@ impl DecommitterOracle { .load_factory_dep(u256_to_h256(hash)) .expect("Trying to decode unexisting hash"); - let value = bytes_to_be_words(value); + let value = bytes_to_be_words(&value); self.known_bytecodes.insert(hash, value.clone(), timestamp); value } diff --git a/core/lib/multivm/src/versions/vm_m5/oracles/storage.rs b/core/lib/multivm/src/versions/vm_m5/oracles/storage.rs index 7ccfdf2f30c7..90bd9cfaab69 100644 --- a/core/lib/multivm/src/versions/vm_m5/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_m5/oracles/storage.rs @@ -7,10 +7,9 @@ use zk_evm_1_3_1::{ zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use zksync_types::{ - utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogKind, - BOOTLOADER_ADDRESS, U256, + u256_to_h256, utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, + StorageLogKind, BOOTLOADER_ADDRESS, U256, }; -use zksync_utils::u256_to_h256; use super::OracleWithHistory; use crate::vm_m5::{ @@ -19,7 +18,7 @@ use crate::vm_m5::{ }, storage::{Storage, StoragePtr}, utils::StorageLogQuery, - vm_instance::MultiVMSubversion, + vm_instance::MultiVmSubversion, }; // While the storage does not support different shards, it was decided to write the @@ -46,7 +45,7 @@ pub struct StorageOracle { // to cover this slot. pub paid_changes: HistoryRecorder>, - pub refund_state: MultiVMSubversion, + pub refund_state: MultiVmSubversion, } impl OracleWithHistory for StorageOracle { @@ -64,7 +63,7 @@ impl OracleWithHistory for StorageOracle { } impl StorageOracle { - pub fn new(storage: StoragePtr, refund_state: MultiVMSubversion) -> Self { + pub fn new(storage: StoragePtr, refund_state: MultiVmSubversion) -> Self { Self { storage: HistoryRecorder::from_inner(StorageWrapper::new(storage)), frames_stack: Default::default(), @@ -75,10 +74,10 @@ impl StorageOracle { fn is_storage_key_free(&self, key: &StorageKey) -> bool { match self.refund_state { - MultiVMSubversion::V1 => { + MultiVmSubversion::V1 => { key.address() == &zksync_system_constants::SYSTEM_CONTEXT_ADDRESS } - MultiVMSubversion::V2 => { + MultiVmSubversion::V2 => { key.address() == &zksync_system_constants::SYSTEM_CONTEXT_ADDRESS || *key == storage_key_for_eth_balance(&BOOTLOADER_ADDRESS) } diff --git a/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs b/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs index 45f8ed88f834..ea92307d1224 100644 --- a/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs +++ b/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs @@ -16,22 +16,23 @@ use zk_evm_1_3_1::{ }, }; use zksync_types::{ - get_code_key, web3::keccak256, AccountTreeId, Address, StorageKey, - ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, H256, - KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, - L2_BASE_TOKEN_ADDRESS, MSG_VALUE_SIMULATOR_ADDRESS, SYSTEM_CONTEXT_ADDRESS, U256, -}; -use zksync_utils::{ - be_bytes_to_safe_address, h256_to_account_address, u256_to_account_address, u256_to_h256, + get_code_key, h256_to_address, u256_to_address, u256_to_h256, web3::keccak256, AccountTreeId, + Address, StorageKey, ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS, + CONTRACT_DEPLOYER_ADDRESS, H256, KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, + L1_MESSENGER_ADDRESS, L2_BASE_TOKEN_ADDRESS, MSG_VALUE_SIMULATOR_ADDRESS, + SYSTEM_CONTEXT_ADDRESS, U256, }; -use crate::vm_m5::{ - errors::VmRevertReasonParsingResult, - memory::SimpleMemory, - storage::{Storage, StoragePtr}, - utils::{aux_heap_page_from_base, heap_page_from_base}, - vm_instance::{get_vm_hook_params, VM_HOOK_POSITION}, - vm_with_bootloader::BOOTLOADER_HEAP_PAGE, +use crate::{ + utils::bytecode::be_bytes_to_safe_address, + vm_m5::{ + errors::VmRevertReasonParsingResult, + memory::SimpleMemory, + storage::{Storage, StoragePtr}, + utils::{aux_heap_page_from_base, heap_page_from_base}, + vm_instance::{get_vm_hook_params, VM_HOOK_POSITION}, + vm_with_bootloader::BOOTLOADER_HEAP_PAGE, + }, }; pub trait ExecutionEndTracer: Tracer { @@ -322,7 +323,7 @@ impl ValidationTracer { // The user is allowed to touch its own slots or slots semantically related to him. let valid_users_slot = address == self.user_address - || u256_to_account_address(&key) == self.user_address + || u256_to_address(&key) == self.user_address || self.auxilary_allowed_slots.contains(&u256_to_h256(key)); if valid_users_slot { return true; @@ -383,7 +384,7 @@ impl ValidationTracer { let packed_abi = data.src0_value.value; let call_destination_value = data.src1_value.value; - let called_address = u256_to_account_address(&call_destination_value); + let called_address = u256_to_address(&call_destination_value); let far_call_abi = FarCallABI::from_u256(packed_abi); if called_address == KECCAK256_PRECOMPILE_ADDRESS @@ -450,7 +451,7 @@ impl ValidationTracer { let value = self.storage.borrow_mut().get_value(&storage_key); return Ok(NewTrustedValidationItems { - new_trusted_addresses: vec![h256_to_account_address(&value)], + new_trusted_addresses: vec![h256_to_address(&value)], ..Default::default() }); } diff --git a/core/lib/multivm/src/versions/vm_m5/pubdata_utils.rs b/core/lib/multivm/src/versions/vm_m5/pubdata_utils.rs index 1fd8c2460930..8eca2ef5cd86 100644 --- a/core/lib/multivm/src/versions/vm_m5/pubdata_utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/pubdata_utils.rs @@ -4,11 +4,11 @@ use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_querie use itertools::Itertools; use zk_evm_1_3_1::aux_structures::{LogQuery, Timestamp}; use zksync_types::{StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; -use zksync_utils::bytecode::bytecode_len_in_bytes; use crate::{ glue::GlueInto, interface::VmEvent, + utils::bytecode::bytecode_len_in_bytes, vm_m5::{ oracles::storage::storage_key_of_log, storage::Storage, utils::collect_storage_log_queries_after_timestamp, vm_instance::VmInstance, @@ -35,9 +35,7 @@ impl VmInstance { let published_bytecode_bytes: u32 = VmEvent::extract_published_bytecodes(&events) .iter() - .map(|bytecodehash| { - bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD - }) + .map(|bytecode_hash| bytecode_len_in_bytes(bytecode_hash) + PUBLISH_BYTECODE_OVERHEAD) .sum(); storage_writes_pubdata_published diff --git a/core/lib/multivm/src/versions/vm_m5/refunds.rs b/core/lib/multivm/src/versions/vm_m5/refunds.rs index fd4e2788f035..8b0d3e5d84c4 100644 --- a/core/lib/multivm/src/versions/vm_m5/refunds.rs +++ b/core/lib/multivm/src/versions/vm_m5/refunds.rs @@ -1,6 +1,5 @@ use zk_evm_1_3_1::aux_structures::Timestamp; -use zksync_types::U256; -use zksync_utils::ceil_div_u256; +use zksync_types::{ceil_div_u256, U256}; use crate::vm_m5::{ storage::Storage, diff --git a/core/lib/multivm/src/versions/vm_m5/test_utils.rs b/core/lib/multivm/src/versions/vm_m5/test_utils.rs index d7c0dfb9f6d0..e0e377e85971 100644 --- a/core/lib/multivm/src/versions/vm_m5/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/test_utils.rs @@ -14,13 +14,13 @@ use zk_evm_1_3_1::{ }; use zksync_contracts::deployer_contract; use zksync_types::{ + address_to_h256, + bytecode::BytecodeHash, ethabi::{Address, Token}, + h256_to_address, u256_to_h256, web3::keccak256, Execute, Nonce, StorageKey, StorageValue, CONTRACT_DEPLOYER_ADDRESS, H256, U256, }; -use zksync_utils::{ - address_to_h256, bytecode::hash_bytecode, h256_to_account_address, u256_to_h256, -}; use super::utils::StorageLogQuery; use crate::vm_m5::{ @@ -143,7 +143,7 @@ pub fn get_create_execute(code: &[u8], calldata: &[u8]) -> Execute { let params = [ Token::FixedBytes(vec![0u8; 32]), - Token::FixedBytes(hash_bytecode(code).0.to_vec()), + Token::FixedBytes(BytecodeHash::for_bytecode(code).value().0.to_vec()), Token::Bytes(calldata.to_vec()), ]; let calldata = contract_function @@ -172,5 +172,5 @@ pub fn get_create_zksync_address(sender_address: Address, sender_nonce: Nonce) - let hash = keccak256(&digest); - h256_to_account_address(&H256(hash)) + h256_to_address(&H256(hash)) } diff --git a/core/lib/multivm/src/versions/vm_m5/transaction_data.rs b/core/lib/multivm/src/versions/vm_m5/transaction_data.rs index b64e3f770185..236c4c3d4122 100644 --- a/core/lib/multivm/src/versions/vm_m5/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_m5/transaction_data.rs @@ -1,17 +1,21 @@ use zk_evm_1_3_1::zkevm_opcode_defs::system_params::{MAX_PUBDATA_PER_BLOCK, MAX_TX_ERGS_LIMIT}; use zksync_types::{ + address_to_h256, + bytecode::BytecodeHash, + ceil_div_u256, ethabi::{encode, Address, Token}, fee::encoding_len, + h256_to_u256, l2::TransactionType, ExecuteTransactionCommon, Transaction, U256, }; -use zksync_utils::{ - address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, ceil_div_u256, h256_to_u256, -}; use super::vm_with_bootloader::MAX_GAS_PER_PUBDATA_BYTE; -use crate::vm_m5::vm_with_bootloader::{ - BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, MAX_TXS_IN_BLOCK, +use crate::{ + utils::bytecode::bytes_to_be_words, + vm_m5::vm_with_bootloader::{ + BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, MAX_TXS_IN_BLOCK, + }, }; const L1_TX_TYPE: u8 = 255; @@ -165,16 +169,13 @@ impl TransactionData { let factory_deps_hashes = self .factory_deps .iter() - .map(|dep| h256_to_u256(hash_bytecode(dep))) + .map(|dep| BytecodeHash::for_bytecode(dep).value_u256()) .collect(); self.abi_encode_with_custom_factory_deps(factory_deps_hashes) } pub fn into_tokens(self) -> Vec { - let bytes = self.abi_encode(); - assert!(bytes.len() % 32 == 0); - - bytes_to_be_words(bytes) + bytes_to_be_words(&self.abi_encode()) } pub fn overhead_gas(&self) -> u32 { diff --git a/core/lib/multivm/src/versions/vm_m5/utils.rs b/core/lib/multivm/src/versions/vm_m5/utils.rs index a38618395b1f..de8c746bfb80 100644 --- a/core/lib/multivm/src/versions/vm_m5/utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/utils.rs @@ -7,8 +7,7 @@ use zk_evm_1_3_1::{ }; use zksync_contracts::BaseSystemContracts; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; -use zksync_types::{Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; -use zksync_utils::h256_to_u256; +use zksync_types::{h256_to_u256, Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; use crate::{ glue::GlueInto, diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index 5a26506f3463..bd104b868401 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -1,16 +1,18 @@ -use zksync_types::{vm::VmVersion, Transaction}; -use zksync_utils::h256_to_u256; +use std::rc::Rc; + +use zksync_types::{h256_to_u256, vm::VmVersion, Transaction}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_m5::{ storage::Storage, - vm_instance::{MultiVMSubversion, VmInstance}, + vm_instance::{MultiVmSubversion, VmInstance}, }, }; @@ -26,7 +28,7 @@ impl Vm { batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr, - vm_sub_version: MultiVMSubversion, + vm_sub_version: MultiVmSubversion, ) -> Self { let oracle_tools = crate::vm_m5::OracleTools::new(storage.clone(), vm_sub_version); let block_properties = zk_evm_1_3_1::block_properties::BlockProperties { @@ -60,21 +62,24 @@ impl VmInterface for Vm { /// Tracers are not supported for here we use `()` as a placeholder type TracerDispatcher = (); - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { crate::vm_m5::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, &tx, self.system_env.execution_mode.glue_into(), - ) + ); + PushTransactionResult { + compressed_bytecodes: (&[]).into(), // bytecode compression isn't supported + } } fn inspect( &mut self, _tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { match execution_mode { - VmExecutionMode::OneTx => match self.system_env.execution_mode { + InspectExecutionMode::OneTx => match self.system_env.execution_mode { TxExecutionMode::VerifyExecute => self.vm.execute_next_tx().glue_into(), TxExecutionMode::EstimateFee | TxExecutionMode::EthCall => self .vm @@ -83,8 +88,7 @@ impl VmInterface for Vm { ) .glue_into(), }, - VmExecutionMode::Batch => self.finish_batch().block_tip_execution_result, - VmExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), + InspectExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), } } @@ -106,11 +110,11 @@ impl VmInterface for Vm { // Bytecode compression isn't supported ( Ok(vec![].into()), - self.inspect(&mut (), VmExecutionMode::OneTx), + self.inspect(&mut (), InspectExecutionMode::OneTx), ) } - fn finish_batch(&mut self) -> FinishedL1Batch { + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { self.vm .execute_till_block_end( crate::vm_m5::vm_with_bootloader::BootloaderJobType::BlockPostprocessing, @@ -123,8 +127,8 @@ impl VmFactory for Vm { fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { let vm_version: VmVersion = system_env.version.into(); let vm_sub_version = match vm_version { - VmVersion::M5WithoutRefunds => MultiVMSubversion::V1, - VmVersion::M5WithRefunds => MultiVMSubversion::V2, + VmVersion::M5WithoutRefunds => MultiVmSubversion::V1, + VmVersion::M5WithRefunds => MultiVmSubversion::V2, _ => panic!("Unsupported protocol version for vm_m5: {:?}", vm_version), }; Self::new_with_subversion(batch_env, system_env, storage, vm_sub_version) diff --git a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs index 4a96c4a750cc..94b86bce7ea7 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs @@ -81,7 +81,7 @@ pub(crate) fn get_vm_hook_params(memory: &SimpleMemory) -> Vec { /// /// This enum allows to execute blocks with the same VM but different support for refunds. #[derive(Debug, Copy, Clone)] -pub enum MultiVMSubversion { +pub enum MultiVmSubversion { /// Initial VM M5 version, refunds are fully disabled. V1, /// Refunds were enabled. ETH balance for bootloader address was marked as a free slot. @@ -99,7 +99,7 @@ pub struct VmInstance { pub snapshots: Vec, /// MultiVM-specific addition. See enum doc-comment for details. - pub(crate) refund_state: MultiVMSubversion, + pub(crate) refund_state: MultiVmSubversion, } /// This structure stores data that accumulates during the VM run. @@ -560,12 +560,12 @@ impl VmInstance { let refund_to_propose; let refund_slot; match self.refund_state { - MultiVMSubversion::V1 => { + MultiVmSubversion::V1 => { refund_to_propose = bootloader_refund; refund_slot = OPERATOR_REFUNDS_OFFSET + self.bootloader_state.tx_to_execute() - 1; } - MultiVMSubversion::V2 => { + MultiVmSubversion::V2 => { let gas_spent_on_pubdata = tracer .gas_spent_on_pubdata(&self.state.local_state) - spent_pubdata_counter_before; diff --git a/core/lib/multivm/src/versions/vm_m5/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_m5/vm_with_bootloader.rs index cd2979db5e57..0a7df48df80f 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm_with_bootloader.rs @@ -14,15 +14,13 @@ use zk_evm_1_3_1::{ use zksync_contracts::BaseSystemContracts; use zksync_system_constants::MAX_L2_TX_GAS_LIMIT; use zksync_types::{ - fee_model::L1PeggedBatchFeeModelInput, Address, Transaction, BOOTLOADER_ADDRESS, - L1_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, U256, -}; -use zksync_utils::{ - address_to_u256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, misc::ceil_div, + address_to_u256, bytecode::BytecodeHash, fee_model::L1PeggedBatchFeeModelInput, h256_to_u256, + Address, Transaction, BOOTLOADER_ADDRESS, L1_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, U256, }; use crate::{ interface::L1BatchEnv, + utils::bytecode::bytes_to_be_words, vm_m5::{ bootloader_state::BootloaderState, oracles::OracleWithHistory, @@ -31,7 +29,7 @@ use crate::{ utils::{ code_page_candidate_from_base, heap_page_from_base, BLOCK_GAS_LIMIT, INITIAL_BASE_PAGE, }, - vm_instance::{MultiVMSubversion, VmInstance, ZkSyncVmState}, + vm_instance::{MultiVmSubversion, VmInstance, ZkSyncVmState}, OracleTools, }, }; @@ -73,8 +71,11 @@ pub(crate) fn eth_price_per_pubdata_byte(l1_gas_price: u64) -> u64 { pub(crate) fn base_fee_to_gas_per_pubdata(l1_gas_price: u64, base_fee: u64) -> u64 { let eth_price_per_pubdata_byte = eth_price_per_pubdata_byte(l1_gas_price); - - ceil_div(eth_price_per_pubdata_byte, base_fee) + if eth_price_per_pubdata_byte == 0 { + 0 + } else { + eth_price_per_pubdata_byte.div_ceil(base_fee) + } } pub(crate) fn derive_base_fee_and_gas_per_pubdata( @@ -91,7 +92,7 @@ pub(crate) fn derive_base_fee_and_gas_per_pubdata( // publish enough public data while compensating us for it. let base_fee = std::cmp::max( fair_l2_gas_price, - ceil_div(eth_price_per_pubdata_byte, MAX_GAS_PER_PUBDATA_BYTE), + eth_price_per_pubdata_byte.div_ceil(MAX_GAS_PER_PUBDATA_BYTE), ); ( @@ -220,7 +221,7 @@ impl Default for TxExecutionMode { } pub fn init_vm( - refund_state: MultiVMSubversion, + refund_state: MultiVmSubversion, oracle_tools: OracleTools, block_context: BlockContextMode, block_properties: BlockProperties, @@ -239,7 +240,7 @@ pub fn init_vm( } pub fn init_vm_with_gas_limit( - refund_state: MultiVMSubversion, + refund_state: MultiVmSubversion, oracle_tools: OracleTools, block_context: BlockContextMode, block_properties: BlockProperties, @@ -336,7 +337,7 @@ impl BlockContextMode { // This method accepts a custom bootloader code. // It should be used only in tests. pub fn init_vm_inner( - refund_state: MultiVMSubversion, + refund_state: MultiVmSubversion, mut oracle_tools: OracleTools, block_context: BlockContextMode, block_properties: BlockProperties, @@ -347,7 +348,7 @@ pub fn init_vm_inner( oracle_tools.decommittment_processor.populate( vec![( h256_to_u256(base_system_contract.default_aa.hash), - base_system_contract.default_aa.code.clone(), + bytes_to_be_words(&base_system_contract.default_aa.code), )], Timestamp(0), ); @@ -355,7 +356,7 @@ pub fn init_vm_inner( oracle_tools.memory.populate( vec![( BOOTLOADER_CODE_PAGE, - base_system_contract.bootloader.code.clone(), + bytes_to_be_words(&base_system_contract.bootloader.code), )], Timestamp(0), ); @@ -583,11 +584,8 @@ fn formal_calldata_abi() -> PrimitiveValue { } pub(crate) fn bytecode_to_factory_dep(bytecode: Vec) -> (U256, Vec) { - let bytecode_hash = hash_bytecode(&bytecode); - let bytecode_hash = U256::from_big_endian(bytecode_hash.as_bytes()); - - let bytecode_words = bytes_to_be_words(bytecode); - + let bytecode_hash = BytecodeHash::for_bytecode(&bytecode).value_u256(); + let bytecode_words = bytes_to_be_words(&bytecode); (bytecode_hash, bytecode_words) } diff --git a/core/lib/multivm/src/versions/vm_m6/events.rs b/core/lib/multivm/src/versions/vm_m6/events.rs index a444ad37feb5..659b41cc2060 100644 --- a/core/lib/multivm/src/versions/vm_m6/events.rs +++ b/core/lib/multivm/src/versions/vm_m6/events.rs @@ -1,8 +1,7 @@ use zk_evm_1_3_1::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use zksync_types::{h256_to_address, L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use crate::interface::VmEvent; +use crate::{interface::VmEvent, utils::bytecode::be_chunks_to_h256_words}; #[derive(Clone)] pub struct SolidityLikeEvent { @@ -135,7 +134,7 @@ pub fn merge_events(events: Vec) -> Vec { .filter(|e| e.address == EVENT_WRITER_ADDRESS) .map(|event| { // The events writer events where the first topic is the actual address of the event and the rest of the topics are real topics - let address = h256_to_account_address(&H256(event.topics[0])); + let address = h256_to_address(&H256(event.topics[0])); let topics = event.topics.into_iter().skip(1).collect(); SolidityLikeEvent { diff --git a/core/lib/multivm/src/versions/vm_m6/history_recorder.rs b/core/lib/multivm/src/versions/vm_m6/history_recorder.rs index 63dc9be4933a..5f7a116c62ac 100644 --- a/core/lib/multivm/src/versions/vm_m6/history_recorder.rs +++ b/core/lib/multivm/src/versions/vm_m6/history_recorder.rs @@ -9,8 +9,7 @@ use zk_evm_1_3_1::{ vm_state::PrimitiveValue, zkevm_opcode_defs::{self}, }; -use zksync_types::{StorageKey, U256}; -use zksync_utils::{h256_to_u256, u256_to_h256}; +use zksync_types::{h256_to_u256, u256_to_h256, StorageKey, U256}; use crate::vm_m6::storage::{Storage, StoragePtr}; diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_m6/oracles/decommitter.rs index fe59580e2ce9..5bd33d6d49c1 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/decommitter.rs @@ -6,13 +6,15 @@ use zk_evm_1_3_1::{ DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, }, }; -use zksync_types::U256; -use zksync_utils::{bytecode::bytecode_len_in_words, bytes_to_be_words, u256_to_h256}; +use zksync_types::{u256_to_h256, U256}; use super::OracleWithHistory; -use crate::vm_m6::{ - history_recorder::{HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory}, - storage::{Storage, StoragePtr}, +use crate::{ + utils::bytecode::{bytecode_len_in_words, bytes_to_be_words}, + vm_m6::{ + history_recorder::{HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory}, + storage::{Storage, StoragePtr}, + }, }; /// The main job of the DecommiterOracle is to implement the DecommitmentProcessor trait - that is @@ -59,7 +61,7 @@ impl DecommitterOracle { .load_factory_dep(u256_to_h256(hash)) .expect("Trying to decode unexisting hash"); - let value = bytes_to_be_words(value); + let value = bytes_to_be_words(&value); self.known_bytecodes.insert(hash, value.clone(), timestamp); value } diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/storage.rs b/core/lib/multivm/src/versions/vm_m6/oracles/storage.rs index 5393b9e48169..7a59754140c9 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/storage.rs @@ -6,10 +6,9 @@ use zk_evm_1_3_1::{ zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use zksync_types::{ - utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogKind, - BOOTLOADER_ADDRESS, U256, + u256_to_h256, utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, + StorageLogKind, BOOTLOADER_ADDRESS, U256, }; -use zksync_utils::u256_to_h256; use super::OracleWithHistory; use crate::vm_m6::{ diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs index 4d963d08952d..9b94ec9de84f 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs @@ -9,8 +9,7 @@ use zksync_system_constants::{ ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, SHA256_PRECOMPILE_ADDRESS, }; -use zksync_types::U256; -use zksync_utils::u256_to_h256; +use zksync_types::{u256_to_h256, U256}; use crate::vm_m6::{ history_recorder::HistoryMode, diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/validation.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/validation.rs index f046ba5befe9..e6b040b93f5d 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/validation.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/validation.rs @@ -11,22 +11,25 @@ use zksync_system_constants::{ KECCAK256_PRECOMPILE_ADDRESS, L2_BASE_TOKEN_ADDRESS, MSG_VALUE_SIMULATOR_ADDRESS, SYSTEM_CONTEXT_ADDRESS, }; -use zksync_types::{get_code_key, web3::keccak256, AccountTreeId, Address, StorageKey, H256, U256}; -use zksync_utils::{ - be_bytes_to_safe_address, h256_to_account_address, u256_to_account_address, u256_to_h256, +use zksync_types::{ + get_code_key, h256_to_address, u256_to_address, u256_to_h256, web3::keccak256, AccountTreeId, + Address, StorageKey, H256, U256, }; -use crate::vm_m6::{ - errors::VmRevertReasonParsingResult, - history_recorder::HistoryMode, - memory::SimpleMemory, - oracles::tracer::{ - utils::{ - computational_gas_price, get_calldata_page_via_abi, print_debug_if_needed, VmHook, +use crate::{ + utils::bytecode::be_bytes_to_safe_address, + vm_m6::{ + errors::VmRevertReasonParsingResult, + history_recorder::HistoryMode, + memory::SimpleMemory, + oracles::tracer::{ + utils::{ + computational_gas_price, get_calldata_page_via_abi, print_debug_if_needed, VmHook, + }, + ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, StorageInvocationTracer, }, - ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, StorageInvocationTracer, + storage::{Storage, StoragePtr}, }, - storage::{Storage, StoragePtr}, }; #[derive(Debug, Clone, Eq, PartialEq, Copy)] @@ -252,7 +255,7 @@ impl ValidationTracer { // The user is allowed to touch its own slots or slots semantically related to him. let valid_users_slot = address == self.user_address - || u256_to_account_address(&key) == self.user_address + || u256_to_address(&key) == self.user_address || self.auxilary_allowed_slots.contains(&u256_to_h256(key)); if valid_users_slot { return true; @@ -319,7 +322,7 @@ impl ValidationTracer { let packed_abi = data.src0_value.value; let call_destination_value = data.src1_value.value; - let called_address = u256_to_account_address(&call_destination_value); + let called_address = u256_to_address(&call_destination_value); let far_call_abi = FarCallABI::from_u256(packed_abi); if called_address == KECCAK256_PRECOMPILE_ADDRESS @@ -386,7 +389,7 @@ impl ValidationTracer { let value = self.storage.borrow_mut().get_value(&storage_key); return Ok(NewTrustedValidationItems { - new_trusted_addresses: vec![h256_to_account_address(&value)], + new_trusted_addresses: vec![h256_to_address(&value)], ..Default::default() }); } diff --git a/core/lib/multivm/src/versions/vm_m6/pubdata_utils.rs b/core/lib/multivm/src/versions/vm_m6/pubdata_utils.rs index 196883e1c936..97bf290a2162 100644 --- a/core/lib/multivm/src/versions/vm_m6/pubdata_utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/pubdata_utils.rs @@ -4,11 +4,11 @@ use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_querie use itertools::Itertools; use zk_evm_1_3_1::aux_structures::{LogQuery, Timestamp}; use zksync_types::{StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; -use zksync_utils::bytecode::bytecode_len_in_bytes; use crate::{ glue::GlueInto, interface::VmEvent, + utils::bytecode::bytecode_len_in_bytes, vm_m6::{ history_recorder::HistoryMode, oracles::storage::storage_key_of_log, storage::Storage, utils::collect_storage_log_queries_after_timestamp, VmInstance, @@ -35,9 +35,7 @@ impl VmInstance { let published_bytecode_bytes: u32 = VmEvent::extract_published_bytecodes(&events) .iter() - .map(|bytecodehash| { - bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD - }) + .map(|bytecode_hash| bytecode_len_in_bytes(bytecode_hash) + PUBLISH_BYTECODE_OVERHEAD) .sum(); storage_writes_pubdata_published diff --git a/core/lib/multivm/src/versions/vm_m6/refunds.rs b/core/lib/multivm/src/versions/vm_m6/refunds.rs index 406bf380a0b2..f98c84409410 100644 --- a/core/lib/multivm/src/versions/vm_m6/refunds.rs +++ b/core/lib/multivm/src/versions/vm_m6/refunds.rs @@ -1,6 +1,5 @@ use zk_evm_1_3_1::aux_structures::Timestamp; -use zksync_types::U256; -use zksync_utils::ceil_div_u256; +use zksync_types::{ceil_div_u256, U256}; use crate::vm_m6::{ history_recorder::HistoryMode, diff --git a/core/lib/multivm/src/versions/vm_m6/test_utils.rs b/core/lib/multivm/src/versions/vm_m6/test_utils.rs index 4bd39bc56dd4..0debd8dea568 100644 --- a/core/lib/multivm/src/versions/vm_m6/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/test_utils.rs @@ -12,13 +12,13 @@ use itertools::Itertools; use zk_evm_1_3_1::{aux_structures::Timestamp, vm_state::VmLocalState}; use zksync_contracts::deployer_contract; use zksync_types::{ + address_to_h256, + bytecode::BytecodeHash, ethabi::{Address, Token}, + h256_to_address, u256_to_h256, web3::keccak256, Execute, Nonce, StorageKey, StorageValue, CONTRACT_DEPLOYER_ADDRESS, H256, U256, }; -use zksync_utils::{ - address_to_h256, bytecode::hash_bytecode, h256_to_account_address, u256_to_h256, -}; use super::utils::StorageLogQuery; use crate::vm_m6::{ @@ -143,7 +143,7 @@ pub fn get_create_execute(code: &[u8], calldata: &[u8]) -> Execute { let params = [ Token::FixedBytes(vec![0u8; 32]), - Token::FixedBytes(hash_bytecode(code).0.to_vec()), + Token::FixedBytes(BytecodeHash::for_bytecode(code).value().0.to_vec()), Token::Bytes(calldata.to_vec()), ]; let calldata = contract_function @@ -172,5 +172,5 @@ pub fn get_create_zksync_address(sender_address: Address, sender_nonce: Nonce) - let hash = keccak256(&digest); - h256_to_account_address(&H256(hash)) + h256_to_address(&H256(hash)) } diff --git a/core/lib/multivm/src/versions/vm_m6/transaction_data.rs b/core/lib/multivm/src/versions/vm_m6/transaction_data.rs index a8f80ea3255e..d0835b233009 100644 --- a/core/lib/multivm/src/versions/vm_m6/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_m6/transaction_data.rs @@ -1,18 +1,22 @@ use zk_evm_1_3_1::zkevm_opcode_defs::system_params::MAX_TX_ERGS_LIMIT; use zksync_types::{ + address_to_h256, + bytecode::BytecodeHash, + ceil_div_u256, ethabi::{encode, Address, Token}, fee::encoding_len, + h256_to_u256, l1::is_l1_tx_type, l2::TransactionType, ExecuteTransactionCommon, Transaction, MAX_L2_TX_GAS_LIMIT, U256, }; -use zksync_utils::{ - address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, ceil_div_u256, h256_to_u256, -}; use super::vm_with_bootloader::{MAX_GAS_PER_PUBDATA_BYTE, MAX_TXS_IN_BLOCK}; -use crate::vm_m6::vm_with_bootloader::{ - BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, +use crate::{ + utils::bytecode::bytes_to_be_words, + vm_m6::vm_with_bootloader::{ + BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, + }, }; pub(crate) const L1_TX_TYPE: u8 = 255; @@ -192,16 +196,13 @@ impl TransactionData { let factory_deps_hashes = self .factory_deps .iter() - .map(|dep| h256_to_u256(hash_bytecode(dep))) + .map(|dep| BytecodeHash::for_bytecode(dep).value_u256()) .collect(); self.abi_encode_with_custom_factory_deps(factory_deps_hashes) } pub fn into_tokens(self) -> Vec { - let bytes = self.abi_encode(); - assert!(bytes.len() % 32 == 0); - - bytes_to_be_words(bytes) + bytes_to_be_words(&self.abi_encode()) } pub(crate) fn effective_gas_price_per_pubdata(&self, block_gas_price_per_pubdata: u32) -> u32 { diff --git a/core/lib/multivm/src/versions/vm_m6/utils.rs b/core/lib/multivm/src/versions/vm_m6/utils.rs index 912a30a4eafc..a9304f5cd525 100644 --- a/core/lib/multivm/src/versions/vm_m6/utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/utils.rs @@ -7,8 +7,7 @@ use zk_evm_1_3_1::{ }; use zksync_contracts::BaseSystemContracts; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; -use zksync_types::{Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; -use zksync_utils::h256_to_u256; +use zksync_types::{h256_to_u256, Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; use crate::{ glue::GlueInto, diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index 1fdc8ae64f80..ff089ba902dd 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -1,19 +1,19 @@ -use std::collections::HashSet; +use std::{collections::HashSet, rc::Rc}; -use zksync_types::{vm::VmVersion, Transaction}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; +use zksync_types::{bytecode::BytecodeHash, h256_to_u256, vm::VmVersion, Transaction}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::StoragePtr, BytecodeCompressionError, BytecodeCompressionResult, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::old::TracerDispatcher, utils::bytecode, - vm_m6::{storage::Storage, vm_instance::MultiVMSubversion, VmInstance}, + vm_m6::{storage::Storage, vm_instance::MultiVmSubversion, VmInstance}, }; #[derive(Debug)] @@ -27,7 +27,7 @@ impl Vm { batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr, - vm_sub_version: MultiVMSubversion, + vm_sub_version: MultiVmSubversion, ) -> Self { let oracle_tools = crate::vm_m6::OracleTools::new(storage.clone(), H::VmM6Mode::default()); let block_properties = zk_evm_1_3_1::block_properties::BlockProperties { @@ -72,19 +72,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn push_transaction(&mut self, tx: Transaction) { - crate::vm_m6::vm_with_bootloader::push_transaction_to_bootloader_memory( - &mut self.vm, - &tx, - self.system_env.execution_mode.glue_into(), - None, - ) + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult { + let compressed_bytecodes = + crate::vm_m6::vm_with_bootloader::push_transaction_to_bootloader_memory( + &mut self.vm, + &tx, + self.system_env.execution_mode.glue_into(), + None, + ); + PushTransactionResult { + compressed_bytecodes: compressed_bytecodes.into(), + } } fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { if let Some(storage_invocations) = tracer.storage_invocations { self.vm @@ -93,7 +97,7 @@ impl VmInterface for Vm { } match execution_mode { - VmExecutionMode::OneTx => match self.system_env.execution_mode { + InspectExecutionMode::OneTx => match self.system_env.execution_mode { TxExecutionMode::VerifyExecute => { let enable_call_tracer = tracer.call_tracer.is_some(); let result = self.vm.execute_next_tx( @@ -112,8 +116,7 @@ impl VmInterface for Vm { ) .glue_into(), }, - VmExecutionMode::Batch => self.finish_batch().block_tip_execution_result, - VmExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), + InspectExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), } } @@ -139,7 +142,7 @@ impl VmInterface for Vm { let mut deps_hashes = HashSet::with_capacity(deps.len()); let mut bytecode_hashes = vec![]; let filtered_deps = deps.iter().filter_map(|bytecode| { - let bytecode_hash = hash_bytecode(bytecode); + let bytecode_hash = BytecodeHash::for_bytecode(bytecode).value(); let is_known = !deps_hashes.insert(bytecode_hash) || self.vm.is_bytecode_exists(&bytecode_hash); @@ -203,7 +206,7 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { self.vm .execute_till_block_end( crate::vm_m6::vm_with_bootloader::BootloaderJobType::BlockPostprocessing, @@ -216,8 +219,8 @@ impl VmFactory for Vm { fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: StoragePtr) -> Self { let vm_version: VmVersion = system_env.version.into(); let vm_sub_version = match vm_version { - VmVersion::M6Initial => MultiVMSubversion::V1, - VmVersion::M6BugWithCompressionFixed => MultiVMSubversion::V2, + VmVersion::M6Initial => MultiVmSubversion::V1, + VmVersion::M6BugWithCompressionFixed => MultiVmSubversion::V2, _ => panic!("Unsupported protocol version for vm_m6: {:?}", vm_version), }; Self::new_with_subversion(batch_env, system_env, storage, vm_sub_version) diff --git a/core/lib/multivm/src/versions/vm_m6/vm_instance.rs b/core/lib/multivm/src/versions/vm_m6/vm_instance.rs index d6c418da4c20..29ef17aa4bc7 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm_instance.rs @@ -82,7 +82,7 @@ pub(crate) fn get_vm_hook_params(memory: &SimpleMemory) -> Ve /// /// This enum allows to execute blocks with the same VM but different support for refunds. #[derive(Debug, Copy, Clone)] -pub enum MultiVMSubversion { +pub enum MultiVmSubversion { /// Initial VM M6 version. V1, /// Bug with code compression was fixed. @@ -98,7 +98,7 @@ pub struct VmInstance { pub(crate) bootloader_state: BootloaderState, pub snapshots: Vec, - pub vm_subversion: MultiVMSubversion, + pub vm_subversion: MultiVmSubversion, } /// This structure stores data that accumulates during the VM run. diff --git a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs index 7a9fbb73fe49..ff83abc45fcf 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs @@ -14,16 +14,13 @@ use zk_evm_1_3_1::{ use zksync_contracts::BaseSystemContracts; use zksync_system_constants::MAX_L2_TX_GAS_LIMIT; use zksync_types::{ - fee_model::L1PeggedBatchFeeModelInput, Address, Transaction, BOOTLOADER_ADDRESS, - L1_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, U256, -}; -use zksync_utils::{ - address_to_u256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, misc::ceil_div, + address_to_u256, bytecode::BytecodeHash, fee_model::L1PeggedBatchFeeModelInput, h256_to_u256, + Address, Transaction, BOOTLOADER_ADDRESS, L1_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, U256, }; use crate::{ interface::{CompressedBytecodeInfo, L1BatchEnv}, - utils::bytecode, + utils::{bytecode, bytecode::bytes_to_be_words}, vm_m6::{ bootloader_state::BootloaderState, history_recorder::HistoryMode, @@ -32,7 +29,7 @@ use crate::{ utils::{ code_page_candidate_from_base, heap_page_from_base, BLOCK_GAS_LIMIT, INITIAL_BASE_PAGE, }, - vm_instance::{MultiVMSubversion, ZkSyncVmState}, + vm_instance::{MultiVmSubversion, ZkSyncVmState}, OracleTools, VmInstance, }, }; @@ -84,8 +81,11 @@ pub(crate) fn eth_price_per_pubdata_byte(l1_gas_price: u64) -> u64 { pub(crate) fn base_fee_to_gas_per_pubdata(l1_gas_price: u64, base_fee: u64) -> u64 { let eth_price_per_pubdata_byte = eth_price_per_pubdata_byte(l1_gas_price); - - ceil_div(eth_price_per_pubdata_byte, base_fee) + if eth_price_per_pubdata_byte == 0 { + 0 + } else { + eth_price_per_pubdata_byte.div_ceil(base_fee) + } } pub(crate) fn derive_base_fee_and_gas_per_pubdata( @@ -102,7 +102,7 @@ pub(crate) fn derive_base_fee_and_gas_per_pubdata( // publish enough public data while compensating us for it. let base_fee = std::cmp::max( fair_l2_gas_price, - ceil_div(eth_price_per_pubdata_byte, MAX_GAS_PER_PUBDATA_BYTE), + eth_price_per_pubdata_byte.div_ceil(MAX_GAS_PER_PUBDATA_BYTE), ); ( @@ -269,7 +269,7 @@ impl Default for TxExecutionMode { } pub fn init_vm( - vm_subversion: MultiVMSubversion, + vm_subversion: MultiVmSubversion, oracle_tools: OracleTools, block_context: BlockContextMode, block_properties: BlockProperties, @@ -288,7 +288,7 @@ pub fn init_vm( } pub fn init_vm_with_gas_limit( - vm_subversion: MultiVMSubversion, + vm_subversion: MultiVmSubversion, oracle_tools: OracleTools, block_context: BlockContextMode, block_properties: BlockProperties, @@ -385,7 +385,7 @@ impl BlockContextMode { // This method accepts a custom bootloader code. // It should be used only in tests. pub fn init_vm_inner( - vm_subversion: MultiVMSubversion, + vm_subversion: MultiVmSubversion, mut oracle_tools: OracleTools, block_context: BlockContextMode, block_properties: BlockProperties, @@ -396,7 +396,7 @@ pub fn init_vm_inner( oracle_tools.decommittment_processor.populate( vec![( h256_to_u256(base_system_contract.default_aa.hash), - base_system_contract.default_aa.code.clone(), + bytes_to_be_words(&base_system_contract.default_aa.code), )], Timestamp(0), ); @@ -404,7 +404,7 @@ pub fn init_vm_inner( oracle_tools.memory.populate( vec![( BOOTLOADER_CODE_PAGE, - base_system_contract.bootloader.code.clone(), + bytes_to_be_words(&base_system_contract.bootloader.code), )], Timestamp(0), ); @@ -433,7 +433,7 @@ fn bootloader_initial_memory(block_properties: &BlockContextMode) -> Vec<(usize, } pub fn get_bootloader_memory( - vm_subversion: MultiVMSubversion, + vm_subversion: MultiVmSubversion, txs: Vec, predefined_refunds: Vec, predefined_compressed_bytecodes: Vec>, @@ -441,14 +441,14 @@ pub fn get_bootloader_memory( block_context: BlockContextMode, ) -> Vec<(usize, U256)> { match vm_subversion { - MultiVMSubversion::V1 => get_bootloader_memory_v1( + MultiVmSubversion::V1 => get_bootloader_memory_v1( txs, predefined_refunds, predefined_compressed_bytecodes, execution_mode, block_context, ), - MultiVMSubversion::V2 => get_bootloader_memory_v2( + MultiVmSubversion::V2 => get_bootloader_memory_v2( txs, predefined_refunds, predefined_compressed_bytecodes, @@ -491,7 +491,7 @@ fn get_bootloader_memory_v1( predefined_refunds[tx_index_in_block], block_gas_price_per_pubdata as u32, previous_compressed, - compressed_bytecodes, + &compressed_bytecodes, ); previous_compressed += total_compressed_len; @@ -536,7 +536,7 @@ fn get_bootloader_memory_v2( predefined_refunds[tx_index_in_block], block_gas_price_per_pubdata as u32, previous_compressed, - compressed_bytecodes, + &compressed_bytecodes, ); previous_compressed += total_compressed_len_words; @@ -554,7 +554,7 @@ pub fn push_transaction_to_bootloader_memory( tx: &Transaction, execution_mode: TxExecutionMode, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { let tx: TransactionData = tx.clone().into(); let block_gas_per_pubdata_byte = vm.block_context.context.block_gas_price_per_pubdata(); let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); @@ -564,7 +564,7 @@ pub fn push_transaction_to_bootloader_memory( execution_mode, overhead, explicit_compressed_bytecodes, - ); + ) } pub fn push_raw_transaction_to_bootloader_memory( @@ -573,16 +573,16 @@ pub fn push_raw_transaction_to_bootloader_memory( execution_mode: TxExecutionMode, predefined_overhead: u32, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { match vm.vm_subversion { - MultiVMSubversion::V1 => push_raw_transaction_to_bootloader_memory_v1( + MultiVmSubversion::V1 => push_raw_transaction_to_bootloader_memory_v1( vm, tx, execution_mode, predefined_overhead, explicit_compressed_bytecodes, ), - MultiVMSubversion::V2 => push_raw_transaction_to_bootloader_memory_v2( + MultiVmSubversion::V2 => push_raw_transaction_to_bootloader_memory_v2( vm, tx, execution_mode, @@ -599,7 +599,7 @@ fn push_raw_transaction_to_bootloader_memory_v1( execution_mode: TxExecutionMode, predefined_overhead: u32, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { let tx_index_in_block = vm.bootloader_state.free_tx_index(); let already_included_txs_size = vm.bootloader_state.free_tx_offset(); @@ -619,7 +619,7 @@ fn push_raw_transaction_to_bootloader_memory_v1( tx.factory_deps .iter() .filter_map(|bytecode| { - if vm.is_bytecode_exists(&hash_bytecode(bytecode)) { + if vm.is_bytecode_exists(&BytecodeHash::for_bytecode(bytecode).value()) { return None; } bytecode::compress(bytecode.clone()).ok() @@ -651,7 +651,7 @@ fn push_raw_transaction_to_bootloader_memory_v1( predefined_overhead, trusted_ergs_limit, previous_bytecodes, - compressed_bytecodes, + &compressed_bytecodes, ); vm.state.memory.populate_page( @@ -661,6 +661,7 @@ fn push_raw_transaction_to_bootloader_memory_v1( ); vm.bootloader_state.add_tx_data(encoded_tx_size); vm.bootloader_state.add_compressed_bytecode(compressed_len); + compressed_bytecodes } // Bytecode compression bug fixed @@ -670,7 +671,7 @@ fn push_raw_transaction_to_bootloader_memory_v2( execution_mode: TxExecutionMode, predefined_overhead: u32, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { let tx_index_in_block = vm.bootloader_state.free_tx_index(); let already_included_txs_size = vm.bootloader_state.free_tx_offset(); @@ -690,7 +691,7 @@ fn push_raw_transaction_to_bootloader_memory_v2( tx.factory_deps .iter() .filter_map(|bytecode| { - if vm.is_bytecode_exists(&hash_bytecode(bytecode)) { + if vm.is_bytecode_exists(&BytecodeHash::for_bytecode(bytecode).value()) { return None; } bytecode::compress(bytecode.clone()).ok() @@ -730,7 +731,7 @@ fn push_raw_transaction_to_bootloader_memory_v2( predefined_overhead, trusted_ergs_limit, previous_bytecodes, - compressed_bytecodes, + &compressed_bytecodes, ); vm.state.memory.populate_page( @@ -741,6 +742,7 @@ fn push_raw_transaction_to_bootloader_memory_v2( vm.bootloader_state.add_tx_data(encoded_tx_size); vm.bootloader_state .add_compressed_bytecode(compressed_bytecodes_encoding_len_words); + compressed_bytecodes } #[allow(clippy::too_many_arguments)] @@ -752,7 +754,7 @@ fn get_bootloader_memory_for_tx( predefined_refund: u32, block_gas_per_pubdata: u32, previous_compressed_bytecode_size: usize, - compressed_bytecodes: Vec, + compressed_bytecodes: &[CompressedBytecodeInfo], ) -> Vec<(usize, U256)> { let overhead_gas = tx.overhead_gas(block_gas_per_pubdata); let trusted_gas_limit = tx.trusted_gas_limit(block_gas_per_pubdata); @@ -779,7 +781,7 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( predefined_overhead: u32, trusted_gas_limit: u32, previous_compressed_bytecode_size: usize, - compressed_bytecodes: Vec, + compressed_bytecodes: &[CompressedBytecodeInfo], ) -> Vec<(usize, U256)> { let mut memory: Vec<(usize, U256)> = Vec::default(); let bootloader_description_offset = @@ -815,11 +817,11 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( COMPRESSED_BYTECODES_OFFSET + 1 + previous_compressed_bytecode_size; let memory_addition: Vec<_> = compressed_bytecodes - .into_iter() - .flat_map(|x| bytecode::encode_call(&x)) + .iter() + .flat_map(bytecode::encode_call) .collect(); - let memory_addition = bytes_to_be_words(memory_addition); + let memory_addition = bytes_to_be_words(&memory_addition); memory.extend( (compressed_bytecodes_offset..compressed_bytecodes_offset + memory_addition.len()) @@ -902,11 +904,8 @@ fn formal_calldata_abi() -> PrimitiveValue { } pub(crate) fn bytecode_to_factory_dep(bytecode: Vec) -> (U256, Vec) { - let bytecode_hash = hash_bytecode(&bytecode); - let bytecode_hash = U256::from_big_endian(bytecode_hash.as_bytes()); - - let bytecode_words = bytes_to_be_words(bytecode); - + let bytecode_hash = BytecodeHash::for_bytecode(&bytecode).value_u256(); + let bytecode_words = bytes_to_be_words(&bytecode); (bytecode_hash, bytecode_words) } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/l2_block.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/l2_block.rs index e8cabebc9f7c..3bc669105b05 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/l2_block.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/l2_block.rs @@ -1,7 +1,6 @@ use std::cmp::Ordering; -use zksync_types::{L2BlockNumber, H256}; -use zksync_utils::concat_and_hash; +use zksync_types::{web3::keccak256_concat, L2BlockNumber, H256}; use crate::{ interface::{L2Block, L2BlockEnv}, @@ -53,7 +52,7 @@ impl BootloaderL2Block { } fn update_rolling_hash(&mut self, tx_hash: H256) { - self.txs_rolling_hash = concat_and_hash(self.txs_rolling_hash, tx_hash) + self.txs_rolling_hash = keccak256_concat(self.txs_rolling_hash, tx_hash) } pub(crate) fn interim_version(&self) -> BootloaderL2Block { diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs index 14c895d7a0b4..a05dc1ae2430 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs @@ -1,10 +1,9 @@ -use zksync_types::U256; -use zksync_utils::{bytes_to_be_words, h256_to_u256}; +use zksync_types::{h256_to_u256, U256}; use super::tx::BootloaderTx; use crate::{ interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, - utils::bytecode, + utils::{bytecode, bytecode::bytes_to_be_words}, vm_refunds_enhancement::{ bootloader_state::l2_block::BootloaderL2Block, constants::{ @@ -23,8 +22,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( .iter() .flat_map(bytecode::encode_call) .collect(); - - bytes_to_be_words(memory_addition) + bytes_to_be_words(&memory_addition) } #[allow(clippy::too_many_arguments)] diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs index f7ab9ae8b517..38cfaa124b16 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs @@ -1,13 +1,12 @@ use itertools::Itertools; -use zksync_types::U256; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; +use zksync_types::{bytecode::BytecodeHash, U256}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, CompressedBytecodeInfo, }, - utils::bytecode, + utils::{bytecode, bytecode::bytes_to_be_words}, vm_refunds_enhancement::Vm, HistoryMode, }; @@ -25,18 +24,15 @@ impl Vm { .storage .get_ptr() .borrow_mut() - .is_bytecode_known(&hash_bytecode(&info.original)) + .is_bytecode_known(&BytecodeHash::for_bytecode(&info.original).value()) }) } } /// Converts bytecode to tokens and hashes it. pub(crate) fn bytecode_to_factory_dep(bytecode: Vec) -> (U256, Vec) { - let bytecode_hash = hash_bytecode(&bytecode); - let bytecode_hash = U256::from_big_endian(bytecode_hash.as_bytes()); - - let bytecode_words = bytes_to_be_words(bytecode); - + let bytecode_hash = BytecodeHash::for_bytecode(&bytecode).value_u256(); + let bytecode_words = bytes_to_be_words(&bytecode); (bytecode_hash, bytecode_words) } @@ -49,7 +45,11 @@ pub(crate) fn compress_bytecodes( .enumerate() .sorted_by_key(|(_idx, dep)| *dep) .dedup_by(|x, y| x.1 == y.1) - .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) + .filter(|(_idx, dep)| { + !storage + .borrow_mut() + .is_bytecode_known(&BytecodeHash::for_bytecode(dep).value()) + }) .sorted_by_key(|(idx, _dep)| *idx) .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs index 9462a89be2ab..e8d19dfbba97 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs @@ -1,4 +1,4 @@ -use std::mem; +use std::{collections::HashMap, mem}; use zk_evm_1_3_3::aux_structures::Timestamp; @@ -90,7 +90,7 @@ impl Vm { logs, statistics, refunds, - new_known_factory_deps: None, + dynamic_factory_deps: HashMap::new(), // dynamic bytecode deployment is not supported }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/events.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/events.rs index 52a4ed8a2876..05ec6557e905 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/events.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/events.rs @@ -1,8 +1,7 @@ use zk_evm_1_3_3::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use zksync_types::{h256_to_address, L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use crate::interface::VmEvent; +use crate::{interface::VmEvent, utils::bytecode::be_chunks_to_h256_words}; #[derive(Clone)] pub(crate) struct SolidityLikeEvent { @@ -135,7 +134,7 @@ pub(crate) fn merge_events(events: Vec) -> Vec .filter(|e| e.address == EVENT_WRITER_ADDRESS) .map(|event| { // The events writer events where the first topic is the actual address of the event and the rest of the topics are real topics - let address = h256_to_account_address(&H256(event.topics[0])); + let address = h256_to_address(&H256(event.topics[0])); let topics = event.topics.into_iter().skip(1).collect(); SolidityLikeEvent { diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/history_recorder.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/history_recorder.rs index 8af2c42db957..d25d2a57259d 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/history_recorder.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/history_recorder.rs @@ -5,8 +5,7 @@ use zk_evm_1_3_3::{ vm_state::PrimitiveValue, zkevm_opcode_defs::{self}, }; -use zksync_types::{StorageKey, H256, U256}; -use zksync_utils::{h256_to_u256, u256_to_h256}; +use zksync_types::{h256_to_u256, u256_to_h256, StorageKey, H256, U256}; use crate::interface::storage::{StoragePtr, WriteStorage}; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/oracles/decommitter.rs index ccc8d9052b7e..b0ce7edbc95d 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/oracles/decommitter.rs @@ -6,12 +6,12 @@ use zk_evm_1_3_3::{ DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, }, }; -use zksync_types::U256; -use zksync_utils::{bytecode::bytecode_len_in_words, bytes_to_be_words, u256_to_h256}; +use zksync_types::{u256_to_h256, U256}; use super::OracleWithHistory; use crate::{ interface::storage::{ReadStorage, StoragePtr}, + utils::bytecode::{bytecode_len_in_words, bytes_to_be_words}, vm_refunds_enhancement::old_vm::history_recorder::{ HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory, }, @@ -61,7 +61,7 @@ impl DecommitterOracle { .load_factory_dep(u256_to_h256(hash)) .expect("Trying to decode unexisting hash"); - let value = bytes_to_be_words(value); + let value = bytes_to_be_words(&value); self.known_bytecodes.insert(hash, value.clone(), timestamp); value } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/oracles/storage.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/oracles/storage.rs index a9c5b71e782e..73a5d610bc26 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/oracles/storage.rs @@ -6,10 +6,9 @@ use zk_evm_1_3_3::{ zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use zksync_types::{ - utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogKind, - BOOTLOADER_ADDRESS, U256, + u256_to_h256, utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, + StorageLogKind, BOOTLOADER_ADDRESS, U256, }; -use zksync_utils::u256_to_h256; use crate::{ glue::GlueInto, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/refunds.rs index 0dbf5a3cbf40..777f0d51460f 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/refunds.rs @@ -5,8 +5,7 @@ use zk_evm_1_3_3::{ vm_state::VmLocalState, }; use zksync_system_constants::{PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; -use zksync_types::{l2_to_l1_log::L2ToL1Log, L1BatchNumber, U256}; -use zksync_utils::{bytecode::bytecode_len_in_bytes, ceil_div_u256, u256_to_h256}; +use zksync_types::{ceil_div_u256, l2_to_l1_log::L2ToL1Log, u256_to_h256, L1BatchNumber, U256}; use crate::{ interface::{ @@ -15,6 +14,7 @@ use crate::{ L1BatchEnv, Refunds, VmEvent, }, tracers::dynamic::vm_1_3_3::DynTracer, + utils::bytecode::bytecode_len_in_bytes, vm_refunds_enhancement::{ bootloader_state::BootloaderState, constants::{BOOTLOADER_HEAP_PAGE, OPERATOR_REFUNDS_OFFSET, TX_GAS_LIMIT_OFFSET}, @@ -335,7 +335,7 @@ pub(crate) fn pubdata_published( let published_bytecode_bytes: u32 = VmEvent::extract_published_bytecodes(&events) .iter() - .map(|bytecodehash| bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD) + .map(|bytecode_hash| bytecode_len_in_bytes(bytecode_hash) + PUBLISH_BYTECODE_OVERHEAD) .sum(); storage_writes_pubdata_published diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs index 1d3e9a272764..d744261e4f48 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs @@ -9,8 +9,7 @@ use zksync_system_constants::{ ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, SHA256_PRECOMPILE_ADDRESS, }; -use zksync_types::U256; -use zksync_utils::u256_to_h256; +use zksync_types::{u256_to_h256, U256}; use crate::vm_refunds_enhancement::{ constants::{ diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs index 22ab09296c91..64802d74c878 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs @@ -1,19 +1,24 @@ use std::convert::TryInto; use zksync_types::{ + address_to_h256, + bytecode::BytecodeHash, ethabi::{encode, Address, Token}, fee::{encoding_len, Fee}, + h256_to_u256, l1::is_l1_tx_type, l2::{L2Tx, TransactionType}, transaction_request::{PaymasterParams, TransactionRequest}, web3::Bytes, Execute, ExecuteTransactionCommon, L2ChainId, L2TxCommonData, Nonce, Transaction, H256, U256, }; -use zksync_utils::{address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; -use crate::vm_refunds_enhancement::{ - constants::MAX_GAS_PER_PUBDATA_BYTE, - utils::overhead::{get_amortized_overhead, OverheadCoefficients}, +use crate::{ + utils::bytecode::bytes_to_be_words, + vm_refunds_enhancement::{ + constants::MAX_GAS_PER_PUBDATA_BYTE, + utils::overhead::{get_amortized_overhead, OverheadCoefficients}, + }, }; /// This structure represents the data that is used by @@ -190,16 +195,13 @@ impl TransactionData { let factory_deps_hashes = self .factory_deps .iter() - .map(|dep| h256_to_u256(hash_bytecode(dep))) + .map(|dep| BytecodeHash::for_bytecode(dep).value_u256()) .collect(); self.abi_encode_with_custom_factory_deps(factory_deps_hashes) } pub(crate) fn into_tokens(self) -> Vec { - let bytes = self.abi_encode(); - assert!(bytes.len() % 32 == 0); - - bytes_to_be_words(bytes) + bytes_to_be_words(&self.abi_encode()) } pub(crate) fn effective_gas_price_per_pubdata(&self, block_gas_price_per_pubdata: u32) -> u32 { diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/vm_state.rs index 22f92891e40a..6776bc37c9d5 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/vm_state.rs @@ -11,14 +11,14 @@ use zk_evm_1_3_3::{ }, }; use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::{block::L2BlockHasher, Address, L2BlockNumber}; -use zksync_utils::h256_to_u256; +use zksync_types::{block::L2BlockHasher, h256_to_u256, Address, L2BlockNumber}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, L1BatchEnv, L2Block, SystemEnv, }, + utils::bytecode::bytes_to_be_words, vm_refunds_enhancement::{ bootloader_state::BootloaderState, constants::BOOTLOADER_HEAP_PAGE, @@ -89,11 +89,7 @@ pub(crate) fn new_vm_state( decommittment_processor.populate( vec![( h256_to_u256(system_env.base_system_smart_contracts.default_aa.hash), - system_env - .base_system_smart_contracts - .default_aa - .code - .clone(), + bytes_to_be_words(&system_env.base_system_smart_contracts.default_aa.code), )], Timestamp(0), ); @@ -101,11 +97,7 @@ pub(crate) fn new_vm_state( memory.populate( vec![( BOOTLOADER_CODE_PAGE, - system_env - .base_system_smart_contracts - .bootloader - .code - .clone(), + bytes_to_be_words(&system_env.base_system_smart_contracts.bootloader.code), )], Timestamp(0), ); diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/l1_batch.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/l1_batch.rs index b449165be348..58419acbe60a 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/l1_batch.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/l1_batch.rs @@ -1,5 +1,4 @@ -use zksync_types::U256; -use zksync_utils::{address_to_u256, h256_to_u256}; +use zksync_types::{address_to_u256, h256_to_u256, U256}; use crate::{interface::L1BatchEnv, vm_refunds_enhancement::utils::fee::get_batch_base_fee}; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/fee.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/fee.rs index f7203b57b4c4..8bd06c7faa6b 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/fee.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/fee.rs @@ -1,6 +1,5 @@ //! Utility functions for vm use zksync_types::fee_model::L1PeggedBatchFeeModelInput; -use zksync_utils::ceil_div; use crate::{ interface::L1BatchEnv, @@ -12,8 +11,11 @@ use crate::{ /// Calculates the amount of gas required to publish one byte of pubdata pub(crate) fn base_fee_to_gas_per_pubdata(l1_gas_price: u64, base_fee: u64) -> u64 { let eth_price_per_pubdata_byte = eth_price_per_pubdata_byte(l1_gas_price); - - ceil_div(eth_price_per_pubdata_byte, base_fee) + if eth_price_per_pubdata_byte == 0 { + 0 + } else { + eth_price_per_pubdata_byte.div_ceil(base_fee) + } } /// Calculates the base fee and gas per pubdata for the given L1 gas price. @@ -30,7 +32,7 @@ pub(crate) fn derive_base_fee_and_gas_per_pubdata( // publish enough public data while compensating us for it. let base_fee = std::cmp::max( fair_l2_gas_price, - ceil_div(eth_price_per_pubdata_byte, MAX_GAS_PER_PUBDATA_BYTE), + eth_price_per_pubdata_byte.div_ceil(MAX_GAS_PER_PUBDATA_BYTE), ); ( diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/l2_blocks.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/l2_blocks.rs index ff5536ae0b97..1095abd82db1 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/l2_blocks.rs @@ -4,9 +4,9 @@ use zksync_system_constants::{ SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES, }; use zksync_types::{ - block::unpack_block_info, web3::keccak256, AccountTreeId, L2BlockNumber, StorageKey, H256, U256, + block::unpack_block_info, h256_to_u256, u256_to_h256, web3::keccak256, AccountTreeId, + L2BlockNumber, StorageKey, H256, U256, }; -use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::interface::{ storage::{ReadStorage, StoragePtr}, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/overhead.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/overhead.rs index af25c4b4d7c4..efcee968db40 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/overhead.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/overhead.rs @@ -1,7 +1,6 @@ use zk_evm_1_3_3::zkevm_opcode_defs::system_params::MAX_TX_ERGS_LIMIT; use zksync_system_constants::MAX_L2_TX_GAS_LIMIT; -use zksync_types::{l1::is_l1_tx_type, U256}; -use zksync_utils::ceil_div_u256; +use zksync_types::{ceil_div_u256, l1::is_l1_tx_type, U256}; use crate::vm_refunds_enhancement::constants::{ BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, MAX_TXS_IN_BLOCK, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index d87fd4d104da..81b0c52cce5e 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -1,12 +1,15 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, vm_latest::HistoryEnabled, @@ -74,18 +77,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { - self.push_transaction_with_compression(tx, true) + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(dispatcher, execution_mode) + self.inspect_inner(dispatcher, execution_mode.into()) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -100,7 +108,7 @@ impl VmInterface for Vm { with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); - let result = self.inspect(dispatcher, VmExecutionMode::OneTx); + let result = self.inspect(dispatcher, InspectExecutionMode::OneTx); if self.has_unpublished_bytecodes() { ( Err(BytecodeCompressionError::BytecodeCompressionFailed), @@ -117,8 +125,8 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/l2_block.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/l2_block.rs index 197ecbff5896..d100b17c7c08 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/l2_block.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/l2_block.rs @@ -1,7 +1,6 @@ use std::cmp::Ordering; -use zksync_types::{L2BlockNumber, H256}; -use zksync_utils::concat_and_hash; +use zksync_types::{web3::keccak256_concat, L2BlockNumber, H256}; use crate::{ interface::{L2Block, L2BlockEnv}, @@ -53,7 +52,7 @@ impl BootloaderL2Block { } fn update_rolling_hash(&mut self, tx_hash: H256) { - self.txs_rolling_hash = concat_and_hash(self.txs_rolling_hash, tx_hash) + self.txs_rolling_hash = keccak256_concat(self.txs_rolling_hash, tx_hash) } pub(crate) fn interim_version(&self) -> BootloaderL2Block { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs index 3e2474835fa4..4c33aeb6e147 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs @@ -1,5 +1,4 @@ -use zksync_types::U256; -use zksync_utils::{bytes_to_be_words, h256_to_u256}; +use zksync_types::{h256_to_u256, U256}; use super::tx::BootloaderTx; use crate::{ @@ -23,8 +22,7 @@ pub(super) fn get_memory_for_compressed_bytecodes( .iter() .flat_map(bytecode::encode_call) .collect(); - - bytes_to_be_words(memory_addition) + bytecode::bytes_to_be_words(&memory_addition) } #[allow(clippy::too_many_arguments)] diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs index d5f2b50b83fc..828b1c961708 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs @@ -1,13 +1,12 @@ use itertools::Itertools; -use zksync_types::U256; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; +use zksync_types::{bytecode::BytecodeHash, U256}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, CompressedBytecodeInfo, }, - utils::bytecode, + utils::{bytecode, bytecode::bytes_to_be_words}, vm_virtual_blocks::Vm, HistoryMode, }; @@ -25,18 +24,15 @@ impl Vm { .storage .get_ptr() .borrow_mut() - .is_bytecode_known(&hash_bytecode(&info.original)) + .is_bytecode_known(&BytecodeHash::for_bytecode(&info.original).value()) }) } } /// Converts bytecode to tokens and hashes it. pub(crate) fn bytecode_to_factory_dep(bytecode: Vec) -> (U256, Vec) { - let bytecode_hash = hash_bytecode(&bytecode); - let bytecode_hash = U256::from_big_endian(bytecode_hash.as_bytes()); - - let bytecode_words = bytes_to_be_words(bytecode); - + let bytecode_hash = BytecodeHash::for_bytecode(&bytecode).value_u256(); + let bytecode_words = bytes_to_be_words(&bytecode); (bytecode_hash, bytecode_words) } @@ -49,7 +45,11 @@ pub(crate) fn compress_bytecodes( .enumerate() .sorted_by_key(|(_idx, dep)| *dep) .dedup_by(|x, y| x.1 == y.1) - .filter(|(_idx, dep)| !storage.borrow_mut().is_bytecode_known(&hash_bytecode(dep))) + .filter(|(_idx, dep)| { + !storage + .borrow_mut() + .is_bytecode_known(&BytecodeHash::for_bytecode(dep).value()) + }) .sorted_by_key(|(idx, _dep)| *idx) .filter_map(|(_idx, dep)| bytecode::compress(dep.clone()).ok()) .collect() diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs index b1ad4d257b77..d3d511ed5398 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs @@ -1,4 +1,4 @@ -use std::mem; +use std::{collections::HashMap, mem}; use zk_evm_1_3_3::aux_structures::Timestamp; @@ -88,7 +88,7 @@ impl Vm { .refund_tracer .map(|r| r.get_refunds()) .unwrap_or_default(), - new_known_factory_deps: None, + dynamic_factory_deps: HashMap::new(), // dynamic bytecode deployment is not supported }; tx_tracer.dispatcher.save_results(&mut result); diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/events.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/events.rs index 52a4ed8a2876..05ec6557e905 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/events.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/events.rs @@ -1,8 +1,7 @@ use zk_evm_1_3_3::{ethereum_types::Address, reference_impls::event_sink::EventMessage}; -use zksync_types::{L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use zksync_utils::{be_chunks_to_h256_words, h256_to_account_address}; +use zksync_types::{h256_to_address, L1BatchNumber, EVENT_WRITER_ADDRESS, H256}; -use crate::interface::VmEvent; +use crate::{interface::VmEvent, utils::bytecode::be_chunks_to_h256_words}; #[derive(Clone)] pub(crate) struct SolidityLikeEvent { @@ -135,7 +134,7 @@ pub(crate) fn merge_events(events: Vec) -> Vec .filter(|e| e.address == EVENT_WRITER_ADDRESS) .map(|event| { // The events writer events where the first topic is the actual address of the event and the rest of the topics are real topics - let address = h256_to_account_address(&H256(event.topics[0])); + let address = h256_to_address(&H256(event.topics[0])); let topics = event.topics.into_iter().skip(1).collect(); SolidityLikeEvent { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/history_recorder.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/history_recorder.rs index cbd4dc0ed738..111a337bf449 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/history_recorder.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/history_recorder.rs @@ -5,8 +5,7 @@ use zk_evm_1_3_3::{ vm_state::PrimitiveValue, zkevm_opcode_defs::{self}, }; -use zksync_types::{StorageKey, H256, U256}; -use zksync_utils::{h256_to_u256, u256_to_h256}; +use zksync_types::{h256_to_u256, u256_to_h256, StorageKey, H256, U256}; use crate::interface::storage::{StoragePtr, WriteStorage}; diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/decommitter.rs index 3c8d72b0b33a..a432e782f658 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/decommitter.rs @@ -6,12 +6,12 @@ use zk_evm_1_3_3::{ DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, }, }; -use zksync_types::U256; -use zksync_utils::{bytecode::bytecode_len_in_words, bytes_to_be_words, u256_to_h256}; +use zksync_types::{u256_to_h256, U256}; use super::OracleWithHistory; use crate::{ interface::storage::{ReadStorage, StoragePtr}, + utils::bytecode::{bytecode_len_in_words, bytes_to_be_words}, vm_virtual_blocks::old_vm::history_recorder::{ HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory, }, @@ -61,7 +61,7 @@ impl DecommitterOracle { .load_factory_dep(u256_to_h256(hash)) .expect("Trying to decode unexisting hash"); - let value = bytes_to_be_words(value); + let value = bytes_to_be_words(&value); self.known_bytecodes.insert(hash, value.clone(), timestamp); value } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/storage.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/storage.rs index defbad70f1a9..0b3a590d8d18 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/storage.rs @@ -6,10 +6,9 @@ use zk_evm_1_3_3::{ zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use zksync_types::{ - utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogKind, - BOOTLOADER_ADDRESS, U256, + u256_to_h256, utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, + StorageLogKind, BOOTLOADER_ADDRESS, U256, }; -use zksync_utils::u256_to_h256; use super::OracleWithHistory; use crate::{ diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/refunds.rs index a2ca08a7ef96..59aa837cd8fb 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/refunds.rs @@ -8,8 +8,9 @@ use zk_evm_1_3_3::{ vm_state::VmLocalState, }; use zksync_system_constants::{PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; -use zksync_types::{l2_to_l1_log::L2ToL1Log, L1BatchNumber, StorageKey, U256}; -use zksync_utils::{bytecode::bytecode_len_in_bytes, ceil_div_u256, u256_to_h256}; +use zksync_types::{ + ceil_div_u256, l2_to_l1_log::L2ToL1Log, u256_to_h256, L1BatchNumber, StorageKey, U256, +}; use crate::{ interface::{ @@ -17,6 +18,7 @@ use crate::{ L1BatchEnv, Refunds, VmEvent, VmExecutionResultAndLogs, }, tracers::dynamic::vm_1_3_3::DynTracer, + utils::bytecode::bytecode_len_in_bytes, vm_virtual_blocks::{ bootloader_state::BootloaderState, constants::{BOOTLOADER_HEAP_PAGE, OPERATOR_REFUNDS_OFFSET, TX_GAS_LIMIT_OFFSET}, @@ -327,7 +329,7 @@ pub(crate) fn pubdata_published( let published_bytecode_bytes: u32 = VmEvent::extract_published_bytecodes(&events) .iter() - .map(|bytecodehash| bytecode_len_in_bytes(*bytecodehash) as u32 + PUBLISH_BYTECODE_OVERHEAD) + .map(|bytecode_hash| bytecode_len_in_bytes(bytecode_hash) + PUBLISH_BYTECODE_OVERHEAD) .sum(); storage_writes_pubdata_published diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs index ef8219ec2b4d..6db2bac819df 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs @@ -9,8 +9,7 @@ use zksync_system_constants::{ ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, SHA256_PRECOMPILE_ADDRESS, }; -use zksync_types::U256; -use zksync_utils::u256_to_h256; +use zksync_types::{u256_to_h256, U256}; use crate::vm_virtual_blocks::{ constants::{ diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs index c96004163a65..d13304c93285 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs @@ -1,19 +1,24 @@ use std::convert::TryInto; use zksync_types::{ + address_to_h256, + bytecode::BytecodeHash, ethabi::{encode, Address, Token}, fee::{encoding_len, Fee}, + h256_to_u256, l1::is_l1_tx_type, l2::{L2Tx, TransactionType}, transaction_request::{PaymasterParams, TransactionRequest}, web3::Bytes, Execute, ExecuteTransactionCommon, L2ChainId, L2TxCommonData, Nonce, Transaction, H256, U256, }; -use zksync_utils::{address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; -use crate::vm_virtual_blocks::{ - constants::MAX_GAS_PER_PUBDATA_BYTE, - utils::overhead::{get_amortized_overhead, OverheadCoefficients}, +use crate::{ + utils::bytecode::bytes_to_be_words, + vm_virtual_blocks::{ + constants::MAX_GAS_PER_PUBDATA_BYTE, + utils::overhead::{get_amortized_overhead, OverheadCoefficients}, + }, }; /// This structure represents the data that is used by @@ -190,16 +195,13 @@ impl TransactionData { let factory_deps_hashes = self .factory_deps .iter() - .map(|dep| h256_to_u256(hash_bytecode(dep))) + .map(|dep| BytecodeHash::for_bytecode(dep).value_u256()) .collect(); self.abi_encode_with_custom_factory_deps(factory_deps_hashes) } pub(crate) fn into_tokens(self) -> Vec { - let bytes = self.abi_encode(); - assert!(bytes.len() % 32 == 0); - - bytes_to_be_words(bytes) + bytes_to_be_words(&self.abi_encode()) } pub(crate) fn effective_gas_price_per_pubdata(&self, block_gas_price_per_pubdata: u32) -> u32 { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/vm_state.rs index d26acc4e9301..d1509bd016d8 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/vm_state.rs @@ -11,14 +11,14 @@ use zk_evm_1_3_3::{ }, }; use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::{block::L2BlockHasher, Address, L2BlockNumber}; -use zksync_utils::h256_to_u256; +use zksync_types::{block::L2BlockHasher, h256_to_u256, Address, L2BlockNumber}; use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, L1BatchEnv, L2Block, SystemEnv, }, + utils::bytecode::bytes_to_be_words, vm_virtual_blocks::{ bootloader_state::BootloaderState, constants::BOOTLOADER_HEAP_PAGE, @@ -89,11 +89,7 @@ pub(crate) fn new_vm_state( decommittment_processor.populate( vec![( h256_to_u256(system_env.base_system_smart_contracts.default_aa.hash), - system_env - .base_system_smart_contracts - .default_aa - .code - .clone(), + bytes_to_be_words(&system_env.base_system_smart_contracts.default_aa.code), )], Timestamp(0), ); @@ -101,11 +97,7 @@ pub(crate) fn new_vm_state( memory.populate( vec![( BOOTLOADER_CODE_PAGE, - system_env - .base_system_smart_contracts - .bootloader - .code - .clone(), + bytes_to_be_words(&system_env.base_system_smart_contracts.bootloader.code), )], Timestamp(0), ); diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/types/l1_batch_env.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/types/l1_batch_env.rs index f86d8749c9ed..08fe00741189 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/types/l1_batch_env.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/types/l1_batch_env.rs @@ -1,5 +1,4 @@ -use zksync_types::U256; -use zksync_utils::{address_to_u256, h256_to_u256}; +use zksync_types::{address_to_u256, h256_to_u256, U256}; use crate::{interface::L1BatchEnv, vm_virtual_blocks::utils::fee::get_batch_base_fee}; diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/fee.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/fee.rs index a53951a851e1..e9d46570983d 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/fee.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/fee.rs @@ -1,6 +1,5 @@ //! Utility functions for vm use zksync_types::fee_model::L1PeggedBatchFeeModelInput; -use zksync_utils::ceil_div; use crate::{ interface::L1BatchEnv, @@ -12,8 +11,11 @@ use crate::{ /// Calculates the amount of gas required to publish one byte of pubdata pub(crate) fn base_fee_to_gas_per_pubdata(l1_gas_price: u64, base_fee: u64) -> u64 { let eth_price_per_pubdata_byte = eth_price_per_pubdata_byte(l1_gas_price); - - ceil_div(eth_price_per_pubdata_byte, base_fee) + if eth_price_per_pubdata_byte == 0 { + 0 + } else { + eth_price_per_pubdata_byte.div_ceil(base_fee) + } } /// Calculates the base fee and gas per pubdata for the given L1 gas price. @@ -31,7 +33,7 @@ pub(crate) fn derive_base_fee_and_gas_per_pubdata( // publish enough public data while compensating us for it. let base_fee = std::cmp::max( fair_l2_gas_price, - ceil_div(eth_price_per_pubdata_byte, MAX_GAS_PER_PUBDATA_BYTE), + eth_price_per_pubdata_byte.div_ceil(MAX_GAS_PER_PUBDATA_BYTE), ); ( diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/l2_blocks.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/l2_blocks.rs index ff5536ae0b97..1095abd82db1 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/l2_blocks.rs @@ -4,9 +4,9 @@ use zksync_system_constants::{ SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES, }; use zksync_types::{ - block::unpack_block_info, web3::keccak256, AccountTreeId, L2BlockNumber, StorageKey, H256, U256, + block::unpack_block_info, h256_to_u256, u256_to_h256, web3::keccak256, AccountTreeId, + L2BlockNumber, StorageKey, H256, U256, }; -use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::interface::{ storage::{ReadStorage, StoragePtr}, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/overhead.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/overhead.rs index cba4700002bb..6c79c05bc5b2 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/overhead.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/overhead.rs @@ -1,7 +1,6 @@ use zk_evm_1_3_3::zkevm_opcode_defs::system_params::MAX_TX_ERGS_LIMIT; use zksync_system_constants::MAX_L2_TX_GAS_LIMIT; -use zksync_types::{l1::is_l1_tx_type, U256}; -use zksync_utils::ceil_div_u256; +use zksync_types::{ceil_div_u256, l1::is_l1_tx_type, U256}; use crate::vm_virtual_blocks::constants::{ BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, MAX_TXS_IN_BLOCK, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 28c09590f2ad..a2d18e10de44 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -1,12 +1,15 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, vm_latest::HistoryEnabled, @@ -74,18 +77,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { - self.push_transaction_with_compression(tx, true) + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, tracer: &mut TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode) + self.inspect_inner(tracer, execution_mode.into()) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -117,8 +125,8 @@ impl VmInterface for Vm { } } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 897070345232..9de99a7eb116 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -1,18 +1,20 @@ -use std::mem; +use std::{mem, rc::Rc}; use zksync_types::{vm::VmVersion, ProtocolVersionId, Transaction}; use zksync_vm2::interface::Tracer; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::history_mode::HistoryMode, interface::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, utils::ShadowVm, - BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, - VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, + SystemEnv, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, tracers::TracerDispatcher, + vm_fast::FastVmVersion, vm_latest::HistoryEnabled, }; @@ -55,8 +57,7 @@ macro_rules! dispatch_legacy_vm { impl VmInterface for LegacyVmInstance { type TracerDispatcher = TracerDispatcher, H>; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { dispatch_legacy_vm!(self.push_transaction(tx)) } @@ -64,7 +65,7 @@ impl VmInterface for LegacyVmInstance { fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { dispatch_legacy_vm!(self.inspect(&mut mem::take(dispatcher).into(), execution_mode)) } @@ -88,8 +89,8 @@ impl VmInterface for LegacyVmInstance { } /// Return the results of execution of all batch - fn finish_batch(&mut self) -> FinishedL1Batch { - dispatch_legacy_vm!(self.finish_batch()) + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + dispatch_legacy_vm!(self.finish_batch(pubdata_builder)) } } @@ -132,7 +133,7 @@ impl LegacyVmInstance { l1_batch_env, system_env, storage_view, - crate::vm_m5::vm_instance::MultiVMSubversion::V1, + crate::vm_m5::vm_instance::MultiVmSubversion::V1, ); Self::VmM5(vm) } @@ -141,7 +142,7 @@ impl LegacyVmInstance { l1_batch_env, system_env, storage_view, - crate::vm_m5::vm_instance::MultiVMSubversion::V2, + crate::vm_m5::vm_instance::MultiVmSubversion::V2, ); Self::VmM5(vm) } @@ -150,7 +151,7 @@ impl LegacyVmInstance { l1_batch_env, system_env, storage_view, - crate::vm_m6::vm_instance::MultiVMSubversion::V1, + crate::vm_m6::vm_instance::MultiVmSubversion::V1, ); Self::VmM6(vm) } @@ -159,7 +160,7 @@ impl LegacyVmInstance { l1_batch_env, system_env, storage_view, - crate::vm_m6::vm_instance::MultiVMSubversion::V2, + crate::vm_m6::vm_instance::MultiVmSubversion::V2, ); Self::VmM6(vm) } @@ -194,7 +195,7 @@ impl LegacyVmInstance { l1_batch_env, system_env, storage_view, - crate::vm_latest::MultiVMSubversion::SmallBootloaderMemory, + crate::vm_latest::MultiVmSubversion::SmallBootloaderMemory, ); Self::Vm1_5_0(vm) } @@ -203,7 +204,16 @@ impl LegacyVmInstance { l1_batch_env, system_env, storage_view, - crate::vm_latest::MultiVMSubversion::IncreasedBootloaderMemory, + crate::vm_latest::MultiVmSubversion::IncreasedBootloaderMemory, + ); + Self::Vm1_5_0(vm) + } + VmVersion::VmGateway => { + let vm = crate::vm_latest::Vm::new_with_subversion( + l1_batch_env, + system_env, + storage_view, + crate::vm_latest::MultiVmSubversion::Gateway, ); Self::Vm1_5_0(vm) } @@ -225,7 +235,7 @@ pub type ShadowedFastVm = ShadowVm< /// Fast VM variants. #[derive(Debug)] -pub enum FastVmInstance { +pub enum FastVmInstance { /// Fast VM running in isolation. Fast(crate::vm_fast::Vm, Tr>), /// Fast VM shadowed by the latest legacy VM. @@ -247,14 +257,14 @@ impl VmInterface for FastVmInsta Tr, ); - fn push_transaction(&mut self, tx: Transaction) { - dispatch_fast_vm!(self.push_transaction(tx)); + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + dispatch_fast_vm!(self.push_transaction(tx)) } fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { match self { Self::Fast(vm) => vm.inspect(&mut tracer.1, execution_mode), @@ -284,8 +294,8 @@ impl VmInterface for FastVmInsta } } - fn finish_batch(&mut self) -> FinishedL1Batch { - dispatch_fast_vm!(self.finish_batch()) + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + dispatch_fast_vm!(self.finish_batch(pubdata_builder)) } } @@ -331,8 +341,5 @@ impl FastVmInstance { /// Checks whether the protocol version is supported by the fast VM. pub fn is_supported_by_fast_vm(protocol_version: ProtocolVersionId) -> bool { - matches!( - protocol_version.into(), - VmVersion::Vm1_5_0IncreasedBootloaderMemory - ) + FastVmVersion::try_from(VmVersion::from(protocol_version)).is_ok() } diff --git a/core/lib/object_store/src/file.rs b/core/lib/object_store/src/file.rs index 308cd65427fb..3484f2dad347 100644 --- a/core/lib/object_store/src/file.rs +++ b/core/lib/object_store/src/file.rs @@ -42,7 +42,6 @@ impl FileBackedObjectStore { Bucket::SchedulerWitnessJobsFri, Bucket::ProofsFri, Bucket::StorageSnapshot, - Bucket::TeeVerifierInput, Bucket::VmDumps, ] { let bucket_path = format!("{base_dir}/{bucket}"); diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index 740e8d76e246..0859d58d04be 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -19,7 +19,6 @@ pub enum Bucket { ProofsTee, StorageSnapshot, DataAvailability, - TeeVerifierInput, VmDumps, } @@ -39,7 +38,6 @@ impl Bucket { Self::ProofsTee => "proofs_tee", Self::StorageSnapshot => "storage_logs_snapshots", Self::DataAvailability => "data_availability", - Self::TeeVerifierInput => "tee_verifier_inputs", Self::VmDumps => "vm_dumps", } } diff --git a/core/lib/protobuf_config/Cargo.toml b/core/lib/protobuf_config/Cargo.toml index 87a0a63567ba..92d9bd53978c 100644 --- a/core/lib/protobuf_config/Cargo.toml +++ b/core/lib/protobuf_config/Cargo.toml @@ -26,7 +26,6 @@ rand.workspace = true hex.workspace = true secrecy.workspace = true tracing.workspace = true -time.workspace = true [build-dependencies] zksync_protobuf_build.workspace = true diff --git a/core/lib/protobuf_config/src/contract_verifier.rs b/core/lib/protobuf_config/src/contract_verifier.rs index e0b0517ea0f6..3fb7cfe0bdf7 100644 --- a/core/lib/protobuf_config/src/contract_verifier.rs +++ b/core/lib/protobuf_config/src/contract_verifier.rs @@ -10,29 +10,19 @@ impl ProtoRepr for proto::ContractVerifier { Ok(Self::Type { compilation_timeout: *required(&self.compilation_timeout) .context("compilation_timeout")?, - polling_interval: self.polling_interval, prometheus_port: required(&self.prometheus_port) .and_then(|x| Ok((*x).try_into()?)) .context("prometheus_port")?, - url: required(&self.url).cloned().context("url")?, port: required(&self.port) .and_then(|x| (*x).try_into().context("overflow")) .context("port")?, - threads_per_server: self - .threads_per_server - .map(|a| a.try_into()) - .transpose() - .context("threads_per_server")?, }) } fn build(this: &Self::Type) -> Self { Self { port: Some(this.port as u32), - url: Some(this.url.clone()), compilation_timeout: Some(this.compilation_timeout), - polling_interval: this.polling_interval, - threads_per_server: this.threads_per_server.map(|a| a as u32), prometheus_port: Some(this.prometheus_port.into()), } } diff --git a/core/lib/protobuf_config/src/contracts.rs b/core/lib/protobuf_config/src/contracts.rs index 84c404367503..660246928edb 100644 --- a/core/lib/protobuf_config/src/contracts.rs +++ b/core/lib/protobuf_config/src/contracts.rs @@ -76,6 +76,12 @@ impl ProtoRepr for proto::Contracts { .map(|x| parse_h160(x)) .transpose() .context("l2_shared_bridge_addr")?, + l2_legacy_shared_bridge_addr: l2 + .legacy_shared_bridge_addr + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("l2_legacy_shared_bridge_addr")?, l1_weth_bridge_proxy_addr: weth_bridge .as_ref() .and_then(|bridge| bridge.l1_address.as_ref().map(|x| parse_h160(x))) @@ -92,6 +98,12 @@ impl ProtoRepr for proto::Contracts { .map(|x| parse_h160(x)) .transpose() .context("l2_testnet_paymaster_addr")?, + l2_timestamp_asserter_addr: l2 + .timestamp_asserter_addr + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("l2_timestamp_asserter_addr")?, l1_multicall3_addr: required(&l1.multicall3_addr) .and_then(|x| parse_h160(x)) .context("l1_multicall3_addr")?, @@ -107,6 +119,12 @@ impl ProtoRepr for proto::Contracts { .map(|x| parse_h160(x)) .transpose() .context("chain_admin_addr")?, + l2_da_validator_addr: l2 + .da_validator_addr + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("l2_da_validator_addr")?, }) } @@ -142,6 +160,13 @@ impl ProtoRepr for proto::Contracts { }), l2: Some(proto::L2 { testnet_paymaster_addr: this.l2_testnet_paymaster_addr.map(|a| format!("{:?}", a)), + da_validator_addr: this.l2_da_validator_addr.map(|a| format!("{:?}", a)), + legacy_shared_bridge_addr: this + .l2_legacy_shared_bridge_addr + .map(|a| format!("{:?}", a)), + timestamp_asserter_addr: this + .l2_timestamp_asserter_addr + .map(|a| format!("{:?}", a)), }), bridges: Some(proto::Bridges { shared: Some(proto::Bridge { diff --git a/core/lib/protobuf_config/src/da_client.rs b/core/lib/protobuf_config/src/da_client.rs index 1499e88efb4c..341a6a9e4f43 100644 --- a/core/lib/protobuf_config/src/da_client.rs +++ b/core/lib/protobuf_config/src/da_client.rs @@ -1,10 +1,12 @@ use anyhow::Context; -use zksync_config::{ - configs::{ - da_client::DAClientConfig::{Avail, ObjectStore}, - {self}, +use zksync_config::configs::{ + self, + da_client::{ + avail::{AvailClientConfig, AvailConfig, AvailDefaultConfig, AvailGasRelayConfig}, + celestia::CelestiaConfig, + eigen::EigenConfig, + DAClientConfig::{Avail, Celestia, Eigen, ObjectStore}, }, - AvailConfig, }; use zksync_protobuf::{required, ProtoRepr}; @@ -18,15 +20,44 @@ impl ProtoRepr for proto::DataAvailabilityClient { let client = match config { proto::data_availability_client::Config::Avail(conf) => Avail(AvailConfig { - api_node_url: required(&conf.api_node_url) - .context("api_node_url")? - .clone(), bridge_api_url: required(&conf.bridge_api_url) .context("bridge_api_url")? .clone(), - app_id: *required(&conf.app_id).context("app_id")?, - timeout: *required(&conf.timeout).context("timeout")? as usize, - max_retries: *required(&conf.max_retries).context("max_retries")? as usize, + timeout_ms: *required(&conf.timeout_ms).context("timeout_ms")? as usize, + config: match conf.config.as_ref() { + Some(proto::avail_config::Config::FullClient(full_client_conf)) => { + AvailClientConfig::FullClient(AvailDefaultConfig { + api_node_url: required(&full_client_conf.api_node_url) + .context("api_node_url")? + .clone(), + app_id: *required(&full_client_conf.app_id).context("app_id")?, + }) + } + Some(proto::avail_config::Config::GasRelay(gas_relay_conf)) => { + AvailClientConfig::GasRelay(AvailGasRelayConfig { + gas_relay_api_url: required(&gas_relay_conf.gas_relay_api_url) + .context("gas_relay_api_url")? + .clone(), + max_retries: *required(&gas_relay_conf.max_retries) + .context("max_retries")? + as usize, + }) + } + None => return Err(anyhow::anyhow!("Invalid Avail DA configuration")), + }, + }), + proto::data_availability_client::Config::Celestia(conf) => Celestia(CelestiaConfig { + api_node_url: required(&conf.api_node_url).context("namespace")?.clone(), + namespace: required(&conf.namespace).context("namespace")?.clone(), + chain_id: required(&conf.chain_id).context("chain_id")?.clone(), + timeout_ms: *required(&conf.timeout_ms).context("timeout_ms")?, + }), + proto::data_availability_client::Config::Eigen(conf) => Eigen(EigenConfig { + rpc_node_url: required(&conf.rpc_node_url) + .context("rpc_node_url")? + .clone(), + inclusion_polling_interval_ms: *required(&conf.inclusion_polling_interval_ms) + .context("inclusion_polling_interval_ms")?, }), proto::data_availability_client::Config::ObjectStore(conf) => { ObjectStore(object_store_proto::ObjectStore::read(conf)?) @@ -37,23 +68,44 @@ impl ProtoRepr for proto::DataAvailabilityClient { } fn build(this: &Self::Type) -> Self { - match &this { - Avail(config) => Self { - config: Some(proto::data_availability_client::Config::Avail( - proto::AvailConfig { - api_node_url: Some(config.api_node_url.clone()), - bridge_api_url: Some(config.bridge_api_url.clone()), - app_id: Some(config.app_id), - timeout: Some(config.timeout as u64), - max_retries: Some(config.max_retries as u64), - }, - )), - }, - ObjectStore(config) => Self { - config: Some(proto::data_availability_client::Config::ObjectStore( - object_store_proto::ObjectStore::build(config), - )), - }, + let config = match &this { + Avail(config) => proto::data_availability_client::Config::Avail(proto::AvailConfig { + bridge_api_url: Some(config.bridge_api_url.clone()), + timeout_ms: Some(config.timeout_ms as u64), + config: match &config.config { + AvailClientConfig::FullClient(conf) => Some( + proto::avail_config::Config::FullClient(proto::AvailClientConfig { + api_node_url: Some(conf.api_node_url.clone()), + app_id: Some(conf.app_id), + }), + ), + AvailClientConfig::GasRelay(conf) => Some( + proto::avail_config::Config::GasRelay(proto::AvailGasRelayConfig { + gas_relay_api_url: Some(conf.gas_relay_api_url.clone()), + max_retries: Some(conf.max_retries as u64), + }), + ), + }, + }), + Celestia(config) => { + proto::data_availability_client::Config::Celestia(proto::CelestiaConfig { + api_node_url: Some(config.api_node_url.clone()), + namespace: Some(config.namespace.clone()), + chain_id: Some(config.chain_id.clone()), + timeout_ms: Some(config.timeout_ms), + }) + } + Eigen(config) => proto::data_availability_client::Config::Eigen(proto::EigenConfig { + rpc_node_url: Some(config.rpc_node_url.clone()), + inclusion_polling_interval_ms: Some(config.inclusion_polling_interval_ms), + }), + ObjectStore(config) => proto::data_availability_client::Config::ObjectStore( + object_store_proto::ObjectStore::build(config), + ), + }; + + Self { + config: Some(config), } } } diff --git a/core/lib/protobuf_config/src/eth.rs b/core/lib/protobuf_config/src/eth.rs index c1d95bd30d2b..d4ea1d9f2697 100644 --- a/core/lib/protobuf_config/src/eth.rs +++ b/core/lib/protobuf_config/src/eth.rs @@ -1,6 +1,7 @@ use anyhow::Context as _; use zksync_config::configs::{self}; use zksync_protobuf::{required, ProtoRepr}; +use zksync_types::pubdata_da::PubdataSendingMode; use crate::{proto::eth as proto, read_optional_repr}; @@ -25,23 +26,21 @@ impl proto::ProofSendingMode { } impl proto::PubdataSendingMode { - fn new(x: &configs::eth_sender::PubdataSendingMode) -> Self { - use configs::eth_sender::PubdataSendingMode as From; + fn new(x: &PubdataSendingMode) -> Self { match x { - From::Calldata => Self::Calldata, - From::Blobs => Self::Blobs, - From::Custom => Self::Custom, - From::RelayedL2Calldata => Self::RelayedL2Calldata, + PubdataSendingMode::Calldata => Self::Calldata, + PubdataSendingMode::Blobs => Self::Blobs, + PubdataSendingMode::Custom => Self::Custom, + PubdataSendingMode::RelayedL2Calldata => Self::RelayedL2Calldata, } } - fn parse(&self) -> configs::eth_sender::PubdataSendingMode { - use configs::eth_sender::PubdataSendingMode as To; + fn parse(&self) -> PubdataSendingMode { match self { - Self::Calldata => To::Calldata, - Self::Blobs => To::Blobs, - Self::Custom => To::Custom, - Self::RelayedL2Calldata => To::RelayedL2Calldata, + Self::Calldata => PubdataSendingMode::Calldata, + Self::Blobs => PubdataSendingMode::Blobs, + Self::Custom => PubdataSendingMode::Custom, + Self::RelayedL2Calldata => PubdataSendingMode::RelayedL2Calldata, } } } diff --git a/core/lib/protobuf_config/src/experimental.rs b/core/lib/protobuf_config/src/experimental.rs index 63fa0ca51eb5..8dfbf413d5a1 100644 --- a/core/lib/protobuf_config/src/experimental.rs +++ b/core/lib/protobuf_config/src/experimental.rs @@ -7,6 +7,14 @@ use zksync_protobuf::{repr::ProtoRepr, required}; use crate::{proto::experimental as proto, read_optional_repr}; +fn parse_vm_mode(raw: Option) -> anyhow::Result { + Ok(raw + .map(proto::FastVmMode::try_from) + .transpose() + .context("fast_vm_mode")? + .map_or_else(FastVmMode::default, |mode| mode.parse())) +} + impl ProtoRepr for proto::Db { type Type = configs::ExperimentalDBConfig; @@ -22,13 +30,12 @@ impl ProtoRepr for proto::Db { .map(|count| NonZeroU32::new(count).context("cannot be 0")) .transpose() .context("state_keeper_db_max_open_files")?, - protective_reads_persistence_enabled: self - .reads_persistence_enabled - .unwrap_or_default(), + protective_reads_persistence_enabled: self.reads_persistence_enabled.unwrap_or(false), processing_delay_ms: self.processing_delay_ms.unwrap_or_default(), include_indices_and_filters_in_block_cache: self .include_indices_and_filters_in_block_cache - .unwrap_or_default(), + .unwrap_or(false), + merkle_tree_repair_stale_keys: self.merkle_tree_repair_stale_keys.unwrap_or(false), }) } @@ -47,6 +54,7 @@ impl ProtoRepr for proto::Db { include_indices_and_filters_in_block_cache: Some( this.include_indices_and_filters_in_block_cache, ), + merkle_tree_repair_stale_keys: Some(this.merkle_tree_repair_stale_keys), } } } @@ -105,12 +113,8 @@ impl ProtoRepr for proto::Vm { fn read(&self) -> anyhow::Result { Ok(Self::Type { playground: read_optional_repr(&self.playground).unwrap_or_default(), - state_keeper_fast_vm_mode: self - .state_keeper_fast_vm_mode - .map(proto::FastVmMode::try_from) - .transpose() - .context("fast_vm_mode")? - .map_or_else(FastVmMode::default, |mode| mode.parse()), + state_keeper_fast_vm_mode: parse_vm_mode(self.state_keeper_fast_vm_mode)?, + api_fast_vm_mode: parse_vm_mode(self.api_fast_vm_mode)?, }) } @@ -120,6 +124,7 @@ impl ProtoRepr for proto::Vm { state_keeper_fast_vm_mode: Some( proto::FastVmMode::new(this.state_keeper_fast_vm_mode).into(), ), + api_fast_vm_mode: Some(proto::FastVmMode::new(this.api_fast_vm_mode).into()), } } } diff --git a/core/lib/protobuf_config/src/external_price_api_client.rs b/core/lib/protobuf_config/src/external_price_api_client.rs index e5ed809a1284..dbc341c1865a 100644 --- a/core/lib/protobuf_config/src/external_price_api_client.rs +++ b/core/lib/protobuf_config/src/external_price_api_client.rs @@ -17,6 +17,9 @@ impl ProtoRepr for proto::ExternalPriceApiClient { numerator: self.forced_numerator, denominator: self.forced_denominator, fluctuation: self.forced_fluctuation, + next_value_fluctuation: self.forced_next_value_fluctuation.unwrap_or( + configs::external_price_api_client::DEFAULT_FORCED_NEXT_VALUE_FLUCTUATION, + ), }), }, ) @@ -26,6 +29,7 @@ impl ProtoRepr for proto::ExternalPriceApiClient { let numerator = this.forced.as_ref().and_then(|x| x.numerator); let denominator = this.forced.as_ref().and_then(|x| x.denominator); let fluctuation = this.forced.as_ref().and_then(|x| x.fluctuation); + let next_value_fluctuation = this.forced.as_ref().map(|x| x.next_value_fluctuation); Self { source: Some(this.source.clone()), @@ -35,6 +39,7 @@ impl ProtoRepr for proto::ExternalPriceApiClient { forced_numerator: numerator, forced_denominator: denominator, forced_fluctuation: fluctuation, + forced_next_value_fluctuation: next_value_fluctuation, } } } diff --git a/core/lib/protobuf_config/src/general.rs b/core/lib/protobuf_config/src/general.rs index b73539a0897f..83b4c84f20bf 100644 --- a/core/lib/protobuf_config/src/general.rs +++ b/core/lib/protobuf_config/src/general.rs @@ -46,6 +46,7 @@ impl ProtoRepr for proto::GeneralConfig { ), experimental_vm_config: read_optional_repr(&self.experimental_vm), prover_job_monitor_config: read_optional_repr(&self.prover_job_monitor), + timestamp_asserter_config: read_optional_repr(&self.timestamp_asserter), }) } @@ -106,6 +107,10 @@ impl ProtoRepr for proto::GeneralConfig { .prover_job_monitor_config .as_ref() .map(ProtoRepr::build), + timestamp_asserter: this + .timestamp_asserter_config + .as_ref() + .map(ProtoRepr::build), } } } diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index 68f7f699de20..90c0ba071c0b 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -28,7 +28,6 @@ mod observability; mod proof_data_handler; pub mod proto; mod prover; -mod prover_autoscaler; mod prover_job_monitor; mod pruning; mod secrets; @@ -36,6 +35,7 @@ mod snapshot_recovery; mod snapshots_creator; #[cfg(test)] mod tests; +mod timestamp_asserter; mod utils; mod vm_runner; mod wallets; diff --git a/core/lib/protobuf_config/src/proof_data_handler.rs b/core/lib/protobuf_config/src/proof_data_handler.rs index 4b7bd2fd7c32..c01e163bd771 100644 --- a/core/lib/protobuf_config/src/proof_data_handler.rs +++ b/core/lib/protobuf_config/src/proof_data_handler.rs @@ -1,6 +1,7 @@ use anyhow::Context as _; use zksync_config::configs; use zksync_protobuf::{repr::ProtoRepr, required}; +use zksync_types::L1BatchNumber; use crate::proto::prover as proto; @@ -14,9 +15,21 @@ impl ProtoRepr for proto::ProofDataHandler { proof_generation_timeout_in_secs: required(&self.proof_generation_timeout_in_secs) .and_then(|x| Ok((*x).try_into()?)) .context("proof_generation_timeout_in_secs")?, - tee_support: required(&self.tee_support) - .copied() - .context("tee_support")?, + tee_config: configs::TeeConfig { + tee_support: self + .tee_support + .unwrap_or_else(configs::TeeConfig::default_tee_support), + first_tee_processed_batch: self + .first_tee_processed_batch + .map(|x| L1BatchNumber(x as u32)) + .unwrap_or_else(configs::TeeConfig::default_first_tee_processed_batch), + tee_proof_generation_timeout_in_secs: self + .tee_proof_generation_timeout_in_secs + .map(|x| x as u16) + .unwrap_or_else( + configs::TeeConfig::default_tee_proof_generation_timeout_in_secs, + ), + }, }) } @@ -24,7 +37,11 @@ impl ProtoRepr for proto::ProofDataHandler { Self { http_port: Some(this.http_port.into()), proof_generation_timeout_in_secs: Some(this.proof_generation_timeout_in_secs.into()), - tee_support: Some(this.tee_support), + tee_support: Some(this.tee_config.tee_support), + first_tee_processed_batch: Some(this.tee_config.first_tee_processed_batch.0 as u64), + tee_proof_generation_timeout_in_secs: Some( + this.tee_config.tee_proof_generation_timeout_in_secs.into(), + ), } } } diff --git a/core/lib/protobuf_config/src/proto/config/contract_verifier.proto b/core/lib/protobuf_config/src/proto/config/contract_verifier.proto index 31b1d3ed2ec4..8493274c6911 100644 --- a/core/lib/protobuf_config/src/proto/config/contract_verifier.proto +++ b/core/lib/protobuf_config/src/proto/config/contract_verifier.proto @@ -4,9 +4,10 @@ package zksync.config.contract_verifier; message ContractVerifier{ optional uint32 port = 1; // required; u16 - optional string url = 2; // required optional uint64 compilation_timeout = 3; - optional uint64 polling_interval = 4; - optional uint32 threads_per_server = 5; optional uint32 prometheus_port = 6; + + reserved 2; reserved "url"; + reserved 4; reserved "polling_interval"; + reserved 5; reserved "threads_per_server"; } diff --git a/core/lib/protobuf_config/src/proto/config/contracts.proto b/core/lib/protobuf_config/src/proto/config/contracts.proto index f4488c7901a1..4ae0ee1614f8 100644 --- a/core/lib/protobuf_config/src/proto/config/contracts.proto +++ b/core/lib/protobuf_config/src/proto/config/contracts.proto @@ -21,6 +21,9 @@ message L1 { message L2 { optional string testnet_paymaster_addr = 1; // optional; H160 + optional string da_validator_addr = 2; // optional; H160 + optional string legacy_shared_bridge_addr = 3; // optional; H160 + optional string timestamp_asserter_addr = 4; // optional; H160 } message Bridge { diff --git a/core/lib/protobuf_config/src/proto/config/da_client.proto b/core/lib/protobuf_config/src/proto/config/da_client.proto index d01bda2c8470..0a302120d775 100644 --- a/core/lib/protobuf_config/src/proto/config/da_client.proto +++ b/core/lib/protobuf_config/src/proto/config/da_client.proto @@ -5,12 +5,40 @@ package zksync.config.da_client; import "zksync/config/object_store.proto"; message AvailConfig { - optional string api_node_url = 1; optional string bridge_api_url = 2; - optional uint32 app_id = 4; - optional uint64 timeout = 5; - optional uint64 max_retries = 6; + oneof config { + AvailClientConfig full_client = 7; + AvailGasRelayConfig gas_relay = 8; + } + optional uint64 timeout_ms = 9; + + reserved 1; reserved "api_node_url"; reserved 3; reserved "seed"; + reserved 4; reserved "app_id"; + reserved 5; reserved "timeout"; + reserved 6; reserved "max_retries"; +} + +message AvailClientConfig { + optional string api_node_url = 1; + optional uint32 app_id = 2; +} + +message AvailGasRelayConfig { + optional string gas_relay_api_url = 1; + optional uint64 max_retries = 2; +} + +message CelestiaConfig { + optional string api_node_url = 1; + optional string namespace = 2; + optional string chain_id = 3; + optional uint64 timeout_ms = 4; +} + +message EigenConfig { + optional string rpc_node_url = 1; + optional uint64 inclusion_polling_interval_ms = 2; } message DataAvailabilityClient { @@ -18,5 +46,7 @@ message DataAvailabilityClient { oneof config { AvailConfig avail = 1; object_store.ObjectStore object_store = 2; + CelestiaConfig celestia = 3; + EigenConfig eigen = 4; } } diff --git a/core/lib/protobuf_config/src/proto/config/experimental.proto b/core/lib/protobuf_config/src/proto/config/experimental.proto index 5e1d045ca670..22de076ece27 100644 --- a/core/lib/protobuf_config/src/proto/config/experimental.proto +++ b/core/lib/protobuf_config/src/proto/config/experimental.proto @@ -10,7 +10,8 @@ message DB { optional uint32 state_keeper_db_max_open_files = 2; // optional optional bool reads_persistence_enabled = 3; optional uint64 processing_delay_ms = 4; - optional bool include_indices_and_filters_in_block_cache = 5; + optional bool include_indices_and_filters_in_block_cache = 5; // optional; defaults to false + optional bool merkle_tree_repair_stale_keys = 6; // optional; defaults to false } // Experimental part of the Snapshot recovery configuration. @@ -37,4 +38,5 @@ message VmPlayground { message Vm { optional VmPlayground playground = 1; // optional optional FastVmMode state_keeper_fast_vm_mode = 2; // optional; if not set, fast VM is not used + optional FastVmMode api_fast_vm_mode = 3; // optional; if not set, fast VM is not used } diff --git a/core/lib/protobuf_config/src/proto/config/external_price_api_client.proto b/core/lib/protobuf_config/src/proto/config/external_price_api_client.proto index 646bcfbd7647..63f3233c575f 100644 --- a/core/lib/protobuf_config/src/proto/config/external_price_api_client.proto +++ b/core/lib/protobuf_config/src/proto/config/external_price_api_client.proto @@ -10,4 +10,5 @@ message ExternalPriceApiClient { optional uint64 forced_numerator = 5; optional uint64 forced_denominator = 6; optional uint32 forced_fluctuation = 7; + optional uint32 forced_next_value_fluctuation = 8; } diff --git a/core/lib/protobuf_config/src/proto/config/general.proto b/core/lib/protobuf_config/src/proto/config/general.proto index ee70b61b18b3..216272f3f9ad 100644 --- a/core/lib/protobuf_config/src/proto/config/general.proto +++ b/core/lib/protobuf_config/src/proto/config/general.proto @@ -26,6 +26,7 @@ import "zksync/config/external_proof_integration_api.proto"; import "zksync/core/consensus.proto"; import "zksync/config/prover_job_monitor.proto"; import "zksync/config/da_client.proto"; +import "zksync/config/timestamp_asserter.proto"; message GeneralConfig { optional database.Postgres postgres = 1; @@ -62,4 +63,5 @@ message GeneralConfig { optional experimental.Vm experimental_vm = 44; optional prover_job_monitor.ProverJobMonitor prover_job_monitor = 45; optional da_client.DataAvailabilityClient da_client = 46; + optional timestamp_asserter.TimestampAsserter timestamp_asserter = 47; } diff --git a/core/lib/protobuf_config/src/proto/config/prover.proto b/core/lib/protobuf_config/src/proto/config/prover.proto index 4fe3861183bf..392834d25f3d 100644 --- a/core/lib/protobuf_config/src/proto/config/prover.proto +++ b/core/lib/protobuf_config/src/proto/config/prover.proto @@ -107,5 +107,7 @@ message WitnessVectorGenerator { message ProofDataHandler { optional uint32 http_port = 1; // required; u16 optional uint32 proof_generation_timeout_in_secs = 2; // required; s - optional bool tee_support = 3; // required + optional bool tee_support = 3; // optional + optional uint64 first_tee_processed_batch = 4; // optional + optional uint32 tee_proof_generation_timeout_in_secs = 5; // optional } diff --git a/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto deleted file mode 100644 index e1d11b94d8f1..000000000000 --- a/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; - -package zksync.config.prover_autoscaler; - -import "zksync/std.proto"; -import "zksync/config/observability.proto"; - -message ProverAutoscalerConfig { - optional std.Duration graceful_shutdown_timeout = 1; // optional - optional ProverAutoscalerAgentConfig agent_config = 2; // optional - optional ProverAutoscalerScalerConfig scaler_config = 3; // optional - optional observability.Observability observability = 4; // optional -} - -message ProverAutoscalerAgentConfig { - optional uint32 prometheus_port = 1; // required - optional uint32 http_port = 2; // required - repeated string namespaces = 3; // optional - optional string cluster_name = 4; // optional -} - -message ProtocolVersion { - optional string namespace = 1; // required - optional string protocol_version = 2; // required -} - -message ClusterPriority { - optional string cluster = 1; // required - optional uint32 priority = 2; // required -} - -message ProverSpeed { - optional string gpu = 1; // required - optional uint32 speed = 2; // required -} - -message ProverAutoscalerScalerConfig { - optional uint32 prometheus_port = 1; // required - optional std.Duration scaler_run_interval = 2; // optional - optional string prover_job_monitor_url = 3; // required - repeated string agents = 4; // required at least one - repeated ProtocolVersion protocol_versions = 5; // repeated at least one - repeated ClusterPriority cluster_priorities = 6; // optional - repeated ProverSpeed prover_speed = 7; // optional - optional uint32 long_pending_duration_s = 8; // optional -} diff --git a/core/lib/protobuf_config/src/proto/config/secrets.proto b/core/lib/protobuf_config/src/proto/config/secrets.proto index 17b915b3f087..7c9d0f928237 100644 --- a/core/lib/protobuf_config/src/proto/config/secrets.proto +++ b/core/lib/protobuf_config/src/proto/config/secrets.proto @@ -21,11 +21,22 @@ message ConsensusSecrets { message AvailSecret { optional string seed_phrase = 1; + optional string gas_relay_api_key = 2; +} + +message CelestiaSecret { + optional string private_key = 1; +} + +message EigenSecret { + optional string private_key = 1; } message DataAvailabilitySecrets { oneof da_secrets { AvailSecret avail = 1; + CelestiaSecret celestia = 2; + EigenSecret eigen = 3; } } diff --git a/core/lib/protobuf_config/src/proto/config/timestamp_asserter.proto b/core/lib/protobuf_config/src/proto/config/timestamp_asserter.proto new file mode 100644 index 000000000000..c8d0b9d1fec7 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/timestamp_asserter.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package zksync.config.timestamp_asserter; + +message TimestampAsserter { + optional uint32 min_time_till_end_sec = 1; // required; u32 +} diff --git a/core/lib/protobuf_config/src/prover_autoscaler.rs b/core/lib/protobuf_config/src/prover_autoscaler.rs deleted file mode 100644 index f7da099cb829..000000000000 --- a/core/lib/protobuf_config/src/prover_autoscaler.rs +++ /dev/null @@ -1,172 +0,0 @@ -use anyhow::Context as _; -use time::Duration; -use zksync_config::configs::{self, prover_autoscaler::Gpu}; -use zksync_protobuf::{read_optional, repr::ProtoRepr, required, ProtoFmt}; - -use crate::{proto::prover_autoscaler as proto, read_optional_repr}; - -impl ProtoRepr for proto::ProverAutoscalerConfig { - type Type = configs::prover_autoscaler::ProverAutoscalerConfig; - fn read(&self) -> anyhow::Result { - Ok(Self::Type { - graceful_shutdown_timeout: read_optional(&self.graceful_shutdown_timeout) - .context("graceful_shutdown_timeout")? - .unwrap_or(Self::Type::default_graceful_shutdown_timeout()), - agent_config: read_optional_repr(&self.agent_config), - scaler_config: read_optional_repr(&self.scaler_config), - observability: read_optional_repr(&self.observability), - }) - } - - fn build(this: &Self::Type) -> Self { - Self { - graceful_shutdown_timeout: Some(ProtoFmt::build(&this.graceful_shutdown_timeout)), - agent_config: this.agent_config.as_ref().map(ProtoRepr::build), - scaler_config: this.scaler_config.as_ref().map(ProtoRepr::build), - observability: this.observability.as_ref().map(ProtoRepr::build), - } - } -} - -impl ProtoRepr for proto::ProverAutoscalerAgentConfig { - type Type = configs::prover_autoscaler::ProverAutoscalerAgentConfig; - fn read(&self) -> anyhow::Result { - Ok(Self::Type { - prometheus_port: required(&self.prometheus_port) - .and_then(|x| Ok((*x).try_into()?)) - .context("prometheus_port")?, - http_port: required(&self.http_port) - .and_then(|x| Ok((*x).try_into()?)) - .context("http_port")?, - namespaces: self.namespaces.to_vec(), - cluster_name: Some("".to_string()), - }) - } - - fn build(this: &Self::Type) -> Self { - Self { - prometheus_port: Some(this.prometheus_port.into()), - http_port: Some(this.http_port.into()), - namespaces: this.namespaces.clone(), - cluster_name: this.cluster_name.clone(), - } - } -} - -impl ProtoRepr for proto::ProverAutoscalerScalerConfig { - type Type = configs::prover_autoscaler::ProverAutoscalerScalerConfig; - fn read(&self) -> anyhow::Result { - Ok(Self::Type { - prometheus_port: required(&self.prometheus_port) - .and_then(|x| Ok((*x).try_into()?)) - .context("prometheus_port")?, - scaler_run_interval: read_optional(&self.scaler_run_interval) - .context("scaler_run_interval")? - .unwrap_or(Self::Type::default_scaler_run_interval()), - prover_job_monitor_url: required(&self.prover_job_monitor_url) - .context("prover_job_monitor_url")? - .clone(), - agents: self.agents.to_vec(), - protocol_versions: self - .protocol_versions - .iter() - .enumerate() - .map(|(i, e)| e.read().context(i)) - .collect::>() - .context("protocol_versions")?, - cluster_priorities: self - .cluster_priorities - .iter() - .enumerate() - .map(|(i, e)| e.read().context(i)) - .collect::>() - .context("cluster_priorities")?, - prover_speed: self - .prover_speed - .iter() - .enumerate() - .map(|(i, e)| e.read().context(i)) - .collect::>() - .context("prover_speed")?, - long_pending_duration: match self.long_pending_duration_s { - Some(s) => Duration::seconds(s.into()), - None => Self::Type::default_long_pending_duration(), - }, - }) - } - - fn build(this: &Self::Type) -> Self { - Self { - prometheus_port: Some(this.prometheus_port.into()), - scaler_run_interval: Some(ProtoFmt::build(&this.scaler_run_interval)), - prover_job_monitor_url: Some(this.prover_job_monitor_url.clone()), - agents: this.agents.clone(), - protocol_versions: this - .protocol_versions - .iter() - .map(|(k, v)| proto::ProtocolVersion::build(&(k.clone(), v.clone()))) - .collect(), - cluster_priorities: this - .cluster_priorities - .iter() - .map(|(k, v)| proto::ClusterPriority::build(&(k.clone(), *v))) - .collect(), - prover_speed: this - .prover_speed - .iter() - .map(|(k, v)| proto::ProverSpeed::build(&(*k, *v))) - .collect(), - long_pending_duration_s: Some(this.long_pending_duration.whole_seconds() as u32), - } - } -} - -impl ProtoRepr for proto::ProtocolVersion { - type Type = (String, String); - fn read(&self) -> anyhow::Result { - Ok(( - required(&self.namespace).context("namespace")?.clone(), - required(&self.protocol_version) - .context("protocol_version")? - .clone(), - )) - } - fn build(this: &Self::Type) -> Self { - Self { - namespace: Some(this.0.clone()), - protocol_version: Some(this.1.clone()), - } - } -} - -impl ProtoRepr for proto::ClusterPriority { - type Type = (String, u32); - fn read(&self) -> anyhow::Result { - Ok(( - required(&self.cluster).context("cluster")?.clone(), - *required(&self.priority).context("priority")?, - )) - } - fn build(this: &Self::Type) -> Self { - Self { - cluster: Some(this.0.clone()), - priority: Some(this.1), - } - } -} - -impl ProtoRepr for proto::ProverSpeed { - type Type = (Gpu, u32); - fn read(&self) -> anyhow::Result { - Ok(( - required(&self.gpu).context("gpu")?.parse()?, - *required(&self.speed).context("speed")?, - )) - } - fn build(this: &Self::Type) -> Self { - Self { - gpu: Some(this.0.to_string()), - speed: Some(this.1), - } - } -} diff --git a/core/lib/protobuf_config/src/secrets.rs b/core/lib/protobuf_config/src/secrets.rs index 587351480078..f5bc10a3e340 100644 --- a/core/lib/protobuf_config/src/secrets.rs +++ b/core/lib/protobuf_config/src/secrets.rs @@ -2,20 +2,20 @@ use std::str::FromStr; use anyhow::Context; use secrecy::ExposeSecret; -use zksync_basic_types::{seed_phrase::SeedPhrase, url::SensitiveUrl}; +use zksync_basic_types::{ + secrets::{APIKey, PrivateKey, SeedPhrase}, + url::SensitiveUrl, +}; use zksync_config::configs::{ consensus::{AttesterSecretKey, ConsensusSecrets, NodeSecretKey, ValidatorSecretKey}, - da_client::avail::AvailSecrets, + da_client::{avail::AvailSecrets, celestia::CelestiaSecrets, eigen::EigenSecrets}, secrets::{DataAvailabilitySecrets, Secrets}, DatabaseSecrets, L1Secrets, }; use zksync_protobuf::{required, ProtoRepr}; use crate::{ - proto::{ - secrets as proto, - secrets::{data_availability_secrets::DaSecrets, AvailSecret}, - }, + proto::{secrets as proto, secrets::data_availability_secrets::DaSecrets}, read_optional_repr, }; @@ -103,13 +103,40 @@ impl ProtoRepr for proto::DataAvailabilitySecrets { let secrets = required(&self.da_secrets).context("config")?; let client = match secrets { - DaSecrets::Avail(avail_secret) => DataAvailabilitySecrets::Avail(AvailSecrets { - seed_phrase: Some( - SeedPhrase::from_str( - required(&avail_secret.seed_phrase).context("seed_phrase")?, - ) - .unwrap(), - ), + DaSecrets::Avail(avail_secret) => { + let seed_phrase = match avail_secret.seed_phrase.as_ref() { + Some(seed) => match SeedPhrase::from_str(seed) { + Ok(seed) => Some(seed), + Err(_) => None, + }, + None => None, + }; + let gas_relay_api_key = match avail_secret.gas_relay_api_key.as_ref() { + Some(api_key) => match APIKey::from_str(api_key) { + Ok(api_key) => Some(api_key), + Err(_) => None, + }, + None => None, + }; + if seed_phrase.is_none() && gas_relay_api_key.is_none() { + return Err(anyhow::anyhow!( + "At least one of seed_phrase or gas_relay_api_key must be provided" + )); + } + DataAvailabilitySecrets::Avail(AvailSecrets { + seed_phrase, + gas_relay_api_key, + }) + } + DaSecrets::Celestia(celestia) => DataAvailabilitySecrets::Celestia(CelestiaSecrets { + private_key: PrivateKey::from_str( + required(&celestia.private_key).context("private_key")?, + )?, + }), + DaSecrets::Eigen(eigen) => DataAvailabilitySecrets::Eigen(EigenSecrets { + private_key: PrivateKey::from_str( + required(&eigen.private_key).context("private_key")?, + )?, }), }; @@ -133,8 +160,33 @@ impl ProtoRepr for proto::DataAvailabilitySecrets { None }; - Some(DaSecrets::Avail(AvailSecret { seed_phrase })) + let gas_relay_api_key = if config.gas_relay_api_key.is_some() { + Some( + config + .clone() + .gas_relay_api_key + .unwrap() + .0 + .expose_secret() + .to_string(), + ) + } else { + None + }; + + Some(DaSecrets::Avail(proto::AvailSecret { + seed_phrase, + gas_relay_api_key, + })) + } + DataAvailabilitySecrets::Celestia(config) => { + Some(DaSecrets::Celestia(proto::CelestiaSecret { + private_key: Some(config.private_key.0.expose_secret().to_string()), + })) } + DataAvailabilitySecrets::Eigen(config) => Some(DaSecrets::Eigen(proto::EigenSecret { + private_key: Some(config.private_key.0.expose_secret().to_string()), + })), }; Self { diff --git a/core/lib/protobuf_config/src/timestamp_asserter.rs b/core/lib/protobuf_config/src/timestamp_asserter.rs new file mode 100644 index 000000000000..5984caff8c6f --- /dev/null +++ b/core/lib/protobuf_config/src/timestamp_asserter.rs @@ -0,0 +1,19 @@ +use anyhow::Context; +use zksync_config::configs::chain::TimestampAsserterConfig; +use zksync_protobuf::{required, ProtoRepr}; + +impl ProtoRepr for crate::proto::config::timestamp_asserter::TimestampAsserter { + type Type = TimestampAsserterConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + min_time_till_end_sec: *required(&self.min_time_till_end_sec) + .context("timestamp_asserter_min_time_till_end_sec")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + min_time_till_end_sec: Some(this.min_time_till_end_sec), + } + } +} diff --git a/core/lib/prover_interface/Cargo.toml b/core/lib/prover_interface/Cargo.toml index 889b80b4fbee..50671fb3acb4 100644 --- a/core/lib/prover_interface/Cargo.toml +++ b/core/lib/prover_interface/Cargo.toml @@ -11,7 +11,7 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_multivm.workspace = true +zksync_vm_interface.workspace = true zksync_object_store.workspace = true zksync_types.workspace = true diff --git a/core/lib/prover_interface/src/api.rs b/core/lib/prover_interface/src/api.rs index 776cd3141cbe..acf104cc4c61 100644 --- a/core/lib/prover_interface/src/api.rs +++ b/core/lib/prover_interface/src/api.rs @@ -31,7 +31,7 @@ pub enum ProofGenerationDataResponse { } #[derive(Debug, Serialize, Deserialize)] -pub struct TeeProofGenerationDataResponse(pub Option>); +pub struct TeeProofGenerationDataResponse(pub Box); #[derive(Debug, Serialize, Deserialize)] pub enum SubmitProofResponse { diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 28bc1998312b..48a839dc9217 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -2,12 +2,12 @@ use std::{collections::HashMap, convert::TryInto, fmt::Debug}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, Bytes}; -use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; use zksync_object_store::{_reexports::BoxedError, serialize_using_bincode, Bucket, StoredObject}; use zksync_types::{ - basic_fri_types::Eip4844Blobs, block::L2BlockExecutionData, + basic_fri_types::Eip4844Blobs, block::L2BlockExecutionData, commitment::PubdataParams, witness_block_state::WitnessStorageState, L1BatchNumber, ProtocolVersionId, H256, U256, }; +use zksync_vm_interface::{L1BatchEnv, SystemEnv}; const HASH_LEN: usize = H256::len_bytes(); @@ -136,7 +136,7 @@ impl WitnessInputMerklePaths { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct VMRunWitnessInputData { pub l1_batch_number: L1BatchNumber, pub used_bytecodes: HashMap>, @@ -205,7 +205,7 @@ impl StoredObject for VMRunWitnessInputData { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct WitnessInputData { pub vm_run_data: VMRunWitnessInputData, pub merkle_paths: WitnessInputMerklePaths, @@ -254,7 +254,7 @@ impl StoredObject for WitnessInputData { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct L1BatchMetadataHashes { pub root_hash: H256, pub meta_hash: H256, @@ -264,27 +264,30 @@ pub struct L1BatchMetadataHashes { /// Version 1 of the data used as input for the TEE verifier. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct V1TeeVerifierInput { - pub witness_input_merkle_paths: WitnessInputMerklePaths, + pub vm_run_data: VMRunWitnessInputData, + pub merkle_paths: WitnessInputMerklePaths, pub l2_blocks_execution_data: Vec, pub l1_batch_env: L1BatchEnv, pub system_env: SystemEnv, - pub used_contracts: Vec<(H256, Vec)>, + pub pubdata_params: PubdataParams, } impl V1TeeVerifierInput { pub fn new( - witness_input_merkle_paths: WitnessInputMerklePaths, + vm_run_data: VMRunWitnessInputData, + merkle_paths: WitnessInputMerklePaths, l2_blocks_execution_data: Vec, l1_batch_env: L1BatchEnv, system_env: SystemEnv, - used_contracts: Vec<(H256, Vec)>, + pubdata_params: PubdataParams, ) -> Self { V1TeeVerifierInput { - witness_input_merkle_paths, + vm_run_data, + merkle_paths, l2_blocks_execution_data, l1_batch_env, system_env, - used_contracts, + pubdata_params, } } } @@ -305,17 +308,6 @@ impl TeeVerifierInput { } } -impl StoredObject for TeeVerifierInput { - const BUCKET: Bucket = Bucket::TeeVerifierInput; - type Key<'a> = L1BatchNumber; - - fn encode_key(key: Self::Key<'_>) -> String { - format!("tee_verifier_input_for_l1_batch_{key}.bin") - } - - serialize_using_bincode!(); -} - #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/snapshots_applier/Cargo.toml b/core/lib/snapshots_applier/Cargo.toml index 4ab0c86843ef..d107aac6d4c6 100644 --- a/core/lib/snapshots_applier/Cargo.toml +++ b/core/lib/snapshots_applier/Cargo.toml @@ -17,7 +17,6 @@ zksync_health_check.workspace = true zksync_types.workspace = true zksync_object_store.workspace = true zksync_web3_decl.workspace = true -zksync_utils.workspace = true vise.workspace = true diff --git a/core/lib/snapshots_applier/src/lib.rs b/core/lib/snapshots_applier/src/lib.rs index b4d24a0b1851..2c68b56ca5c6 100644 --- a/core/lib/snapshots_applier/src/lib.rs +++ b/core/lib/snapshots_applier/src/lib.rs @@ -13,6 +13,7 @@ use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthChe use zksync_object_store::{ObjectStore, ObjectStoreError}; use zksync_types::{ api, + bytecode::BytecodeHash, snapshots::{ SnapshotFactoryDependencies, SnapshotHeader, SnapshotRecoveryStatus, SnapshotStorageLog, SnapshotStorageLogsChunk, SnapshotStorageLogsStorageKey, SnapshotVersion, @@ -20,7 +21,6 @@ use zksync_types::{ tokens::TokenInfo, L1BatchNumber, L2BlockNumber, StorageKey, H256, }; -use zksync_utils::bytecode::hash_bytecode; use zksync_web3_decl::{ client::{DynClient, L2}, error::{ClientRpcContext, EnrichedClientError, EnrichedClientResult}, @@ -800,9 +800,15 @@ impl<'a> SnapshotsApplier<'a> { // in underlying query, see `https://www.postgresql.org/docs/current/limits.html` // there were around 100 thousand contracts on mainnet, where this issue first manifested for chunk in factory_deps.factory_deps.chunks(1000) { + // TODO: bytecode hashing is ambiguous with EVM bytecodes let chunk_deps_hashmap: HashMap> = chunk .iter() - .map(|dep| (hash_bytecode(&dep.bytecode.0), dep.bytecode.0.clone())) + .map(|dep| { + ( + BytecodeHash::for_bytecode(&dep.bytecode.0).value(), + dep.bytecode.0.clone(), + ) + }) .collect(); storage .factory_deps_dal() diff --git a/core/lib/snapshots_applier/src/tests/utils.rs b/core/lib/snapshots_applier/src/tests/utils.rs index 2c9b1440af2a..cf68d2e181a6 100644 --- a/core/lib/snapshots_applier/src/tests/utils.rs +++ b/core/lib/snapshots_applier/src/tests/utils.rs @@ -182,6 +182,7 @@ pub(super) fn mock_l2_block_header(l2_block_number: L2BlockNumber) -> L2BlockHea virtual_blocks: 0, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), } } diff --git a/core/lib/state/Cargo.toml b/core/lib/state/Cargo.toml index dd56368f3d2e..ced06de1a8e8 100644 --- a/core/lib/state/Cargo.toml +++ b/core/lib/state/Cargo.toml @@ -14,7 +14,6 @@ categories.workspace = true vise.workspace = true zksync_dal.workspace = true zksync_types.workspace = true -zksync_utils.workspace = true zksync_shared_metrics.workspace = true zksync_storage.workspace = true zksync_vm_interface.workspace = true diff --git a/core/lib/state/src/storage_factory/mod.rs b/core/lib/state/src/storage_factory/mod.rs index 0b514f8f9644..be7e20c5f83d 100644 --- a/core/lib/state/src/storage_factory/mod.rs +++ b/core/lib/state/src/storage_factory/mod.rs @@ -5,8 +5,7 @@ use async_trait::async_trait; use tokio::{runtime::Handle, sync::watch}; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_storage::RocksDB; -use zksync_types::{L1BatchNumber, StorageKey, StorageValue, H256}; -use zksync_utils::u256_to_h256; +use zksync_types::{u256_to_h256, L1BatchNumber, StorageKey, StorageValue, H256}; use zksync_vm_interface::storage::{ReadStorage, StorageSnapshot}; use self::metrics::{SnapshotStage, SNAPSHOT_METRICS}; @@ -201,10 +200,7 @@ impl CommonStorage<'static> { let factory_deps = bytecodes .into_iter() - .map(|(hash_u256, words)| { - let bytes: Vec = words.into_iter().flatten().collect(); - (u256_to_h256(hash_u256), bytes) - }) + .map(|(hash_u256, bytes)| (u256_to_h256(hash_u256), bytes)) .collect(); let storage = previous_values.into_iter().map(|(key, prev_value)| { diff --git a/core/lib/state/src/test_utils.rs b/core/lib/state/src/test_utils.rs index decb2a0f403d..a12508f615f0 100644 --- a/core/lib/state/src/test_utils.rs +++ b/core/lib/state/src/test_utils.rs @@ -88,6 +88,7 @@ pub(crate) async fn create_l2_block( virtual_blocks: 0, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; conn.blocks_dal() diff --git a/core/lib/tee_verifier/Cargo.toml b/core/lib/tee_verifier/Cargo.toml index 6828eeef8b10..289803fb5a89 100644 --- a/core/lib/tee_verifier/Cargo.toml +++ b/core/lib/tee_verifier/Cargo.toml @@ -11,18 +11,20 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_multivm.workspace = true zksync_config.workspace = true zksync_crypto_primitives.workspace = true zksync_merkle_tree.workspace = true -zksync_object_store.workspace = true +zksync_multivm.workspace = true zksync_prover_interface.workspace = true zksync_types.workspace = true -zksync_utils.workspace = true anyhow.workspace = true +once_cell.workspace = true serde.workspace = true tracing.workspace = true [dev-dependencies] zksync_contracts.workspace = true +zksync_prover_interface.workspace = true + +bincode.workspace = true diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index 86b563f823e8..8e8362b57f4b 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -4,27 +4,28 @@ //! executing the VM and verifying all the accessed memory slots by their //! merkle path. -use std::{cell::RefCell, rc::Rc}; - -use anyhow::Context; +use anyhow::{bail, Context, Result}; use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ BlockOutputWithProofs, TreeInstruction, TreeLogEntry, TreeLogEntryWithProof, ValueHash, }; use zksync_multivm::{ interface::{ - storage::{InMemoryStorage, ReadStorage, StorageView}, + storage::{ReadStorage, StorageSnapshot, StorageView}, FinishedL1Batch, L2BlockEnv, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, + pubdata_builders::pubdata_params_to_builder, vm_latest::HistoryEnabled, LegacyVmInstance, }; use zksync_prover_interface::inputs::{ StorageLogMetadata, V1TeeVerifierInput, WitnessInputMerklePaths, }; -use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, StorageLog, Transaction, H256}; -use zksync_utils::bytecode::hash_bytecode; +use zksync_types::{ + block::L2BlockExecutionData, commitment::PubdataParams, u256_to_h256, L1BatchNumber, + StorageLog, StorageValue, Transaction, H256, +}; /// A structure to hold the result of verification. pub struct VerificationResult { @@ -50,29 +51,47 @@ impl Verify for V1TeeVerifierInput { /// not actionable. fn verify(self) -> anyhow::Result { let old_root_hash = self.l1_batch_env.previous_batch_hash.unwrap(); - let l2_chain_id = self.system_env.chain_id; - let enumeration_index = self.witness_input_merkle_paths.next_enumeration_index(); + let enumeration_index = self.merkle_paths.next_enumeration_index(); + let batch_number = self.l1_batch_env.number; - let mut raw_storage = InMemoryStorage::with_custom_system_contracts_and_chain_id( - l2_chain_id, - hash_bytecode, - Vec::with_capacity(0), - ); + let read_storage_ops = self + .vm_run_data + .witness_block_state + .read_storage_key + .into_iter(); - for (hash, bytes) in self.used_contracts.into_iter() { - tracing::trace!("raw_storage.store_factory_dep({hash}, bytes)"); - raw_storage.store_factory_dep(hash, bytes) - } + let initial_writes_ops = self + .vm_run_data + .witness_block_state + .is_write_initial + .into_iter(); - let block_output_with_proofs = - get_bowp_and_set_initial_values(self.witness_input_merkle_paths, &mut raw_storage); + // We need to define storage slots read during batch execution, and their initial state; + // hence, the use of both read_storage_ops and initial_writes_ops. + // StorageSnapshot also requires providing enumeration indices, + // but they only matter at the end of execution when creating pubdata for the batch, + // which is irrelevant in this case. Thus, enumeration indices are set to dummy values. + let storage = read_storage_ops + .enumerate() + .map(|(i, (hash, bytes))| (hash.hashed_key(), Some((bytes, i as u64 + 1u64)))) + .chain(initial_writes_ops.filter_map(|(key, initial_write)| { + initial_write.then_some((key.hashed_key(), None)) + })) + .collect(); - let storage_view = Rc::new(RefCell::new(StorageView::new(&raw_storage))); + let factory_deps = self + .vm_run_data + .used_bytecodes + .into_iter() + .map(|(hash, bytes)| (u256_to_h256(hash), bytes.into_flattened())) + .collect(); - let batch_number = self.l1_batch_env.number; + let storage_snapshot = StorageSnapshot::new(storage, factory_deps); + let storage_view = StorageView::new(storage_snapshot).to_rc_ptr(); let vm = LegacyVmInstance::new(self.l1_batch_env, self.system_env, storage_view); + let vm_out = execute_vm(self.l2_blocks_execution_data, vm, self.pubdata_params)?; - let vm_out = execute_vm(self.l2_blocks_execution_data, vm)?; + let block_output_with_proofs = get_bowp(self.merkle_paths)?; let instructions: Vec = generate_tree_instructions(enumeration_index, &block_output_with_proofs, vm_out)?; @@ -89,11 +108,8 @@ impl Verify for V1TeeVerifierInput { } /// Sets the initial storage values and returns `BlockOutputWithProofs` -fn get_bowp_and_set_initial_values( - witness_input_merkle_paths: WitnessInputMerklePaths, - raw_storage: &mut InMemoryStorage, -) -> BlockOutputWithProofs { - let logs = witness_input_merkle_paths +fn get_bowp(witness_input_merkle_paths: WitnessInputMerklePaths) -> Result { + let logs_result: Result<_, _> = witness_input_merkle_paths .into_merkle_paths() .map( |StorageLogMetadata { @@ -110,29 +126,31 @@ fn get_bowp_and_set_initial_values( let merkle_path = merkle_paths.into_iter().map(|x| x.into()).collect(); let base: TreeLogEntry = match (is_write, first_write, leaf_enumeration_index) { (false, _, 0) => TreeLogEntry::ReadMissingKey, - (false, _, _) => { + (false, false, _) => { // This is a special U256 here, which needs `to_little_endian` let mut hashed_key = [0_u8; 32]; leaf_storage_key.to_little_endian(&mut hashed_key); - raw_storage.set_value_hashed_enum( - hashed_key.into(), - leaf_enumeration_index, - value_read.into(), + tracing::trace!( + "TreeLogEntry::Read {leaf_storage_key:x} = {:x}", + StorageValue::from(value_read) ); TreeLogEntry::Read { leaf_index: leaf_enumeration_index, value: value_read.into(), } } + (false, true, _) => { + tracing::error!("get_bowp is_write = false, first_write = true"); + bail!("get_bowp is_write = false, first_write = true"); + } (true, true, _) => TreeLogEntry::Inserted, (true, false, _) => { // This is a special U256 here, which needs `to_little_endian` let mut hashed_key = [0_u8; 32]; leaf_storage_key.to_little_endian(&mut hashed_key); - raw_storage.set_value_hashed_enum( - hashed_key.into(), - leaf_enumeration_index, - value_read.into(), + tracing::trace!( + "TreeLogEntry::Updated {leaf_storage_key:x} = {:x}", + StorageValue::from(value_read) ); TreeLogEntry::Updated { leaf_index: leaf_enumeration_index, @@ -140,25 +158,28 @@ fn get_bowp_and_set_initial_values( } } }; - TreeLogEntryWithProof { + Ok(TreeLogEntryWithProof { base, merkle_path, root_hash, - } + }) }, ) .collect(); - BlockOutputWithProofs { + let logs: Vec = logs_result?; + + Ok(BlockOutputWithProofs { logs, leaf_count: 0, - } + }) } /// Executes the VM and returns `FinishedL1Batch` on success. fn execute_vm( l2_blocks_execution_data: Vec, mut vm: LegacyVmInstance, + pubdata_params: PubdataParams, ) -> anyhow::Result { let next_l2_blocks_data = l2_blocks_execution_data.iter().skip(1); @@ -176,12 +197,18 @@ fn execute_vm( .context("failed to execute transaction in TeeVerifierInputProducer")?; tracing::trace!("Finished execution of tx: {tx:?}"); } + + tracing::trace!("finished l2_block {l2_block_data:?}"); + tracing::trace!("about to vm.start_new_l2_block {next_l2_block_data:?}"); + vm.start_new_l2_block(L2BlockEnv::from_l2_block_data(next_l2_block_data)); tracing::trace!("Finished execution of l2_block: {:?}", l2_block_data.number); } - Ok(vm.finish_batch()) + tracing::trace!("about to vm.finish_batch()"); + + Ok(vm.finish_batch(pubdata_params_to_builder(pubdata_params))) } /// Map `LogQuery` and `TreeLogEntry` to a `TreeInstruction` @@ -191,7 +218,7 @@ fn map_log_tree( idx: &mut u64, ) -> anyhow::Result { let key = storage_log.key.hashed_key_u256(); - Ok(match (storage_log.is_write(), *tree_log_entry) { + let tree_instruction = match (storage_log.is_write(), *tree_log_entry) { (true, TreeLogEntry::Updated { leaf_index, .. }) => { TreeInstruction::write(key, leaf_index, H256(storage_log.value.into())) } @@ -203,24 +230,31 @@ fn map_log_tree( (false, TreeLogEntry::Read { value, .. }) => { if storage_log.value != value { tracing::error!( - "Failed to map LogQuery to TreeInstruction: {:#?} != {:#?}", - storage_log.value, - value - ); - anyhow::bail!( - "Failed to map LogQuery to TreeInstruction: {:#?} != {:#?}", + ?storage_log, + ?tree_log_entry, + "Failed to map LogQuery to TreeInstruction: read value {:#?} != {:#?}", storage_log.value, value ); + anyhow::bail!("Failed to map LogQuery to TreeInstruction"); } TreeInstruction::Read(key) } (false, TreeLogEntry::ReadMissingKey { .. }) => TreeInstruction::Read(key), - _ => { - tracing::error!("Failed to map LogQuery to TreeInstruction"); + (true, TreeLogEntry::Read { .. }) + | (true, TreeLogEntry::ReadMissingKey) + | (false, TreeLogEntry::Inserted) + | (false, TreeLogEntry::Updated { .. }) => { + tracing::error!( + ?storage_log, + ?tree_log_entry, + "Failed to map LogQuery to TreeInstruction" + ); anyhow::bail!("Failed to map LogQuery to TreeInstruction"); } - }) + }; + + Ok(tree_instruction) } /// Generates the `TreeInstruction`s from the VM executions. @@ -269,15 +303,25 @@ fn execute_tx( mod tests { use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv, TxExecutionMode}; - use zksync_object_store::StoredObject; - use zksync_prover_interface::inputs::TeeVerifierInput; - use zksync_types::U256; + use zksync_prover_interface::inputs::{TeeVerifierInput, VMRunWitnessInputData}; use super::*; #[test] fn test_v1_serialization() { let tvi = V1TeeVerifierInput::new( + VMRunWitnessInputData { + l1_batch_number: Default::default(), + used_bytecodes: Default::default(), + initial_heap_content: vec![], + protocol_version: Default::default(), + bootloader_code: vec![], + default_account_code_hash: Default::default(), + evm_emulator_code_hash: Some(Default::default()), + storage_refunds: vec![], + pubdata_costs: vec![], + witness_block_state: Default::default(), + }, WitnessInputMerklePaths::new(0), vec![], L1BatchEnv { @@ -299,11 +343,11 @@ mod tests { version: Default::default(), base_system_smart_contracts: BaseSystemContracts { bootloader: SystemContractCode { - code: vec![U256([1; 4])], + code: vec![1; 32], hash: H256([1; 32]), }, default_aa: SystemContractCode { - code: vec![U256([1; 4])], + code: vec![1; 32], hash: H256([1; 32]), }, evm_emulator: None, @@ -313,14 +357,12 @@ mod tests { default_validation_computational_gas_limit: 0, chain_id: Default::default(), }, - vec![(H256([1; 32]), vec![0, 1, 2, 3, 4])], + Default::default(), ); let tvi = TeeVerifierInput::new(tvi); - let serialized = ::serialize(&tvi) - .expect("Failed to serialize TeeVerifierInput."); + let serialized = bincode::serialize(&tvi).expect("Failed to serialize TeeVerifierInput."); let deserialized: TeeVerifierInput = - ::deserialize(serialized) - .expect("Failed to deserialize TeeVerifierInput."); + bincode::deserialize(&serialized).expect("Failed to deserialize TeeVerifierInput."); assert_eq!(tvi, deserialized); } diff --git a/core/tests/test_account/Cargo.toml b/core/lib/test_contracts/Cargo.toml similarity index 61% rename from core/tests/test_account/Cargo.toml rename to core/lib/test_contracts/Cargo.toml index 0dda4f8ac777..d9df995b7fa8 100644 --- a/core/tests/test_account/Cargo.toml +++ b/core/lib/test_contracts/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "zksync_test_account" -description = "ZKsync test account for writing unit tests" +name = "zksync_test_contracts" +description = "ZKsync test contracts for writing unit tests" version.workspace = true edition.workspace = true authors.workspace = true @@ -13,10 +13,15 @@ categories.workspace = true [dependencies] zksync_types.workspace = true zksync_system_constants.workspace = true -zksync_utils.workspace = true zksync_eth_signer.workspace = true -zksync_contracts.workspace = true hex.workspace = true +once_cell.workspace = true ethabi.workspace = true rand.workspace = true +serde.workspace = true +serde_json.workspace = true + +[build-dependencies] +serde_json.workspace = true +foundry-compilers.workspace = true diff --git a/core/lib/test_contracts/README.md b/core/lib/test_contracts/README.md new file mode 100644 index 000000000000..2c5515269d42 --- /dev/null +++ b/core/lib/test_contracts/README.md @@ -0,0 +1,16 @@ +# ZKsync Era Test Contracts + +This library exposes contracts used in ZKsync Era codebase for unit testing. + +## Contents + +Some of the commonly used contracts included into this crate are: + +- [`LoadnextContract`](contracts/loadnext/loadnext_contract.sol): Emulates various kinds of load (storage reads / writes, hashing, emitting events + deploying contracts etc.). Used in load testing. +- [`Counter`](contracts/counter/counter.sol): Simple stateful counter. Can be used to test "cheap" transactions and reverts. + +## Building + +Building the library relies on `foundry-compilers`; it doesn't require any external tools. If there are any issues during build, it may be useful +to inspect build artifacts, which are located in one of `target/{debug,release}/build/zksync_test_contracts-$random_numbers` directories. diff --git a/core/lib/test_contracts/build.rs b/core/lib/test_contracts/build.rs new file mode 100644 index 000000000000..64825e18d404 --- /dev/null +++ b/core/lib/test_contracts/build.rs @@ -0,0 +1,143 @@ +use std::{ + collections::{HashMap, HashSet}, + env, + fs::File, + io::{BufWriter, Write}, + path::{Path, PathBuf}, +}; + +use foundry_compilers::{ + artifacts::{ + zksolc::output_selection::{FileOutputSelection, OutputSelection, OutputSelectionFlag}, + Remapping, + }, + solc, + zksolc::{ + settings::{Optimizer, ZkSolcError, ZkSolcWarning}, + ZkSettings, ZkSolcCompiler, ZkSolcSettings, + }, + zksync, + zksync::artifact_output::zk::{ZkArtifactOutput, ZkContractArtifact}, + ArtifactId, ProjectBuilder, ProjectPathsConfig, +}; + +#[derive(Debug)] +struct ContractEntry { + abi: String, + bytecode: Vec, +} + +impl ContractEntry { + fn new(artifact: ZkContractArtifact) -> Option { + let abi = artifact.abi.expect("no ABI"); + let abi = serde_json::to_string(&abi).expect("cannot serialize ABI to string"); + let bytecode = artifact.bytecode?; // Bytecode is `None` for interfaces + let bytecode = bytecode + .object + .into_bytes() + .expect("bytecode is not fully compiled") + .into(); + Some(Self { abi, bytecode }) + } +} + +fn save_artifacts( + output: &mut impl Write, + artifacts: impl Iterator, +) { + let source_dir = Path::new(env!("CARGO_MANIFEST_DIR")).join("contracts"); + let mut modules = HashMap::<_, HashMap<_, _>>::new(); + + for (id, artifact) in artifacts { + let Ok(path_in_sources) = id.source.strip_prefix(&source_dir) else { + continue; // The artifact doesn't correspond to a source contract + }; + let contract_dir = path_in_sources.iter().next().expect("no dir"); + let module_name = contract_dir + .to_str() + .expect("contract dir is not UTF-8") + .replace('-', "_"); + if let Some(entry) = ContractEntry::new(artifact) { + modules + .entry(module_name) + .or_default() + .insert(id.name, entry); + } + } + + for (module_name, module_entries) in modules { + writeln!(output, "pub(crate) mod {module_name} {{").unwrap(); + for (contract_name, entry) in module_entries { + writeln!( + output, + " pub(crate) const {contract_name}: crate::contracts::RawContract = crate::contracts::RawContract {{" + ) + .unwrap(); + writeln!(output, " abi: r#\"{}\"#,", entry.abi).unwrap(); // ABI shouldn't include '"#' combinations for this to work + writeln!(output, " bytecode: &{:?},", entry.bytecode).unwrap(); + writeln!(output, " }};").unwrap(); + } + writeln!(output, "}}").unwrap(); + } +} + +/// `zksolc` compiler settings. +fn compiler_settings() -> ZkSolcSettings { + ZkSolcSettings { + cli_settings: solc::CliSettings::default(), + settings: ZkSettings { + // Optimizer must be enabled; otherwise, system calls work incorrectly for whatever reason + optimizer: Optimizer { + enabled: Some(true), + ..Optimizer::default() + }, + // Required by optimizer + via_ir: Some(true), + output_selection: OutputSelection { + all: FileOutputSelection { + per_file: HashSet::from([OutputSelectionFlag::ABI]), + per_contract: HashSet::from([OutputSelectionFlag::ABI]), + }, + }, + enable_eravm_extensions: true, + suppressed_errors: HashSet::from([ZkSolcError::SendTransfer]), + suppressed_warnings: HashSet::from([ZkSolcWarning::TxOrigin]), + ..ZkSettings::default() + }, + } +} + +fn main() { + let settings = compiler_settings(); + let temp_dir = PathBuf::from(env::var("OUT_DIR").expect("no `OUT_DIR` provided")); + let paths = ProjectPathsConfig::builder() + .sources(Path::new(env!("CARGO_MANIFEST_DIR")).join("contracts")) + .remapping(Remapping { + context: None, + name: "@openzeppelin/contracts".into(), + path: format!( + "{}/contract-libs/openzeppelin-contracts-v4/contracts", + env!("CARGO_MANIFEST_DIR") + ), + }) + .artifacts(temp_dir.join("artifacts")) + .cache(temp_dir.join("cache")) + .build() + .unwrap(); + + let project = ProjectBuilder::::new(ZkArtifactOutput::default()) + .paths(paths) + .settings(settings) + .build(ZkSolcCompiler::default()) + .unwrap(); + let output = zksync::project_compile(&project).unwrap(); + output.assert_success(); + + let module_path = temp_dir.join("raw_contracts.rs"); + let module = File::create(&module_path).expect("failed creating output Rust module"); + let mut module = BufWriter::new(module); + save_artifacts(&mut module, output.into_artifacts()); + + // Tell Cargo that if a source file changes, to rerun this build script. + project.rerun_if_sources_changed(); +} diff --git a/core/lib/test_contracts/contract-libs/openzeppelin-contracts-v4 b/core/lib/test_contracts/contract-libs/openzeppelin-contracts-v4 new file mode 120000 index 000000000000..ec18125715f9 --- /dev/null +++ b/core/lib/test_contracts/contract-libs/openzeppelin-contracts-v4 @@ -0,0 +1 @@ +../../../../contracts/l1-contracts/lib/openzeppelin-contracts-v4 \ No newline at end of file diff --git a/etc/contracts-test-data/contracts/complex-upgrade/complex-upgrade.sol b/core/lib/test_contracts/contracts/complex-upgrade/complex-upgrade.sol similarity index 100% rename from etc/contracts-test-data/contracts/complex-upgrade/complex-upgrade.sol rename to core/lib/test_contracts/contracts/complex-upgrade/complex-upgrade.sol diff --git a/etc/contracts-test-data/contracts/complex-upgrade/msg-sender.sol b/core/lib/test_contracts/contracts/complex-upgrade/msg-sender.sol similarity index 100% rename from etc/contracts-test-data/contracts/complex-upgrade/msg-sender.sol rename to core/lib/test_contracts/contracts/complex-upgrade/msg-sender.sol diff --git a/etc/contracts-test-data/contracts/context/context.sol b/core/lib/test_contracts/contracts/context/context.sol similarity index 100% rename from etc/contracts-test-data/contracts/context/context.sol rename to core/lib/test_contracts/contracts/context/context.sol diff --git a/etc/contracts-test-data/contracts/counter/counter.sol b/core/lib/test_contracts/contracts/counter/counter.sol similarity index 100% rename from etc/contracts-test-data/contracts/counter/counter.sol rename to core/lib/test_contracts/contracts/counter/counter.sol diff --git a/etc/contracts-test-data/contracts/counter/proxy_counter.sol b/core/lib/test_contracts/contracts/counter/proxy_counter.sol similarity index 100% rename from etc/contracts-test-data/contracts/counter/proxy_counter.sol rename to core/lib/test_contracts/contracts/counter/proxy_counter.sol diff --git a/etc/contracts-test-data/contracts/custom-account/Constants.sol b/core/lib/test_contracts/contracts/custom-account/Constants.sol similarity index 100% rename from etc/contracts-test-data/contracts/custom-account/Constants.sol rename to core/lib/test_contracts/contracts/custom-account/Constants.sol diff --git a/etc/contracts-test-data/contracts/custom-account/RLPEncoder.sol b/core/lib/test_contracts/contracts/custom-account/RLPEncoder.sol similarity index 100% rename from etc/contracts-test-data/contracts/custom-account/RLPEncoder.sol rename to core/lib/test_contracts/contracts/custom-account/RLPEncoder.sol diff --git a/etc/contracts-test-data/contracts/custom-account/SystemContext.sol b/core/lib/test_contracts/contracts/custom-account/SystemContext.sol similarity index 100% rename from etc/contracts-test-data/contracts/custom-account/SystemContext.sol rename to core/lib/test_contracts/contracts/custom-account/SystemContext.sol diff --git a/etc/contracts-test-data/contracts/custom-account/SystemContractsCaller.sol b/core/lib/test_contracts/contracts/custom-account/SystemContractsCaller.sol similarity index 100% rename from etc/contracts-test-data/contracts/custom-account/SystemContractsCaller.sol rename to core/lib/test_contracts/contracts/custom-account/SystemContractsCaller.sol diff --git a/etc/contracts-test-data/contracts/custom-account/TransactionHelper.sol b/core/lib/test_contracts/contracts/custom-account/TransactionHelper.sol similarity index 100% rename from etc/contracts-test-data/contracts/custom-account/TransactionHelper.sol rename to core/lib/test_contracts/contracts/custom-account/TransactionHelper.sol diff --git a/etc/contracts-test-data/contracts/custom-account/Utils.sol b/core/lib/test_contracts/contracts/custom-account/Utils.sol similarity index 100% rename from etc/contracts-test-data/contracts/custom-account/Utils.sol rename to core/lib/test_contracts/contracts/custom-account/Utils.sol diff --git a/etc/contracts-test-data/contracts/custom-account/custom-account.sol b/core/lib/test_contracts/contracts/custom-account/custom-account.sol similarity index 100% rename from etc/contracts-test-data/contracts/custom-account/custom-account.sol rename to core/lib/test_contracts/contracts/custom-account/custom-account.sol diff --git a/etc/contracts-test-data/contracts/custom-account/custom-paymaster.sol b/core/lib/test_contracts/contracts/custom-account/custom-paymaster.sol similarity index 100% rename from etc/contracts-test-data/contracts/custom-account/custom-paymaster.sol rename to core/lib/test_contracts/contracts/custom-account/custom-paymaster.sol diff --git a/etc/contracts-test-data/contracts/custom-account/interfaces/IAccount.sol b/core/lib/test_contracts/contracts/custom-account/interfaces/IAccount.sol similarity index 100% rename from etc/contracts-test-data/contracts/custom-account/interfaces/IAccount.sol rename to core/lib/test_contracts/contracts/custom-account/interfaces/IAccount.sol diff --git a/etc/contracts-test-data/contracts/custom-account/interfaces/IContractDeployer.sol b/core/lib/test_contracts/contracts/custom-account/interfaces/IContractDeployer.sol similarity index 100% rename from etc/contracts-test-data/contracts/custom-account/interfaces/IContractDeployer.sol rename to core/lib/test_contracts/contracts/custom-account/interfaces/IContractDeployer.sol diff --git a/etc/contracts-test-data/contracts/custom-account/interfaces/INonceHolder.sol b/core/lib/test_contracts/contracts/custom-account/interfaces/INonceHolder.sol similarity index 100% rename from etc/contracts-test-data/contracts/custom-account/interfaces/INonceHolder.sol rename to core/lib/test_contracts/contracts/custom-account/interfaces/INonceHolder.sol diff --git a/etc/contracts-test-data/contracts/custom-account/interfaces/IPaymaster.sol b/core/lib/test_contracts/contracts/custom-account/interfaces/IPaymaster.sol similarity index 100% rename from etc/contracts-test-data/contracts/custom-account/interfaces/IPaymaster.sol rename to core/lib/test_contracts/contracts/custom-account/interfaces/IPaymaster.sol diff --git a/etc/contracts-test-data/contracts/custom-account/interfaces/IPaymasterFlow.sol b/core/lib/test_contracts/contracts/custom-account/interfaces/IPaymasterFlow.sol similarity index 100% rename from etc/contracts-test-data/contracts/custom-account/interfaces/IPaymasterFlow.sol rename to core/lib/test_contracts/contracts/custom-account/interfaces/IPaymasterFlow.sol diff --git a/etc/contracts-test-data/contracts/custom-account/many-owners-custom-account.sol b/core/lib/test_contracts/contracts/custom-account/many-owners-custom-account.sol similarity index 100% rename from etc/contracts-test-data/contracts/custom-account/many-owners-custom-account.sol rename to core/lib/test_contracts/contracts/custom-account/many-owners-custom-account.sol diff --git a/etc/contracts-test-data/contracts/custom-account/nonce-holder-test.sol b/core/lib/test_contracts/contracts/custom-account/nonce-holder-test.sol similarity index 100% rename from etc/contracts-test-data/contracts/custom-account/nonce-holder-test.sol rename to core/lib/test_contracts/contracts/custom-account/nonce-holder-test.sol diff --git a/etc/contracts-test-data/contracts/error/error.sol b/core/lib/test_contracts/contracts/error/error.sol similarity index 100% rename from etc/contracts-test-data/contracts/error/error.sol rename to core/lib/test_contracts/contracts/error/error.sol diff --git a/etc/contracts-test-data/contracts/expensive/expensive.sol b/core/lib/test_contracts/contracts/expensive/expensive.sol similarity index 100% rename from etc/contracts-test-data/contracts/expensive/expensive.sol rename to core/lib/test_contracts/contracts/expensive/expensive.sol diff --git a/etc/contracts-test-data/contracts/failed-call/failed_call.sol b/core/lib/test_contracts/contracts/failed-call/failed_call.sol similarity index 100% rename from etc/contracts-test-data/contracts/failed-call/failed_call.sol rename to core/lib/test_contracts/contracts/failed-call/failed_call.sol diff --git a/etc/contracts-test-data/contracts/infinite/infinite.sol b/core/lib/test_contracts/contracts/infinite/infinite.sol similarity index 100% rename from etc/contracts-test-data/contracts/infinite/infinite.sol rename to core/lib/test_contracts/contracts/infinite/infinite.sol diff --git a/core/lib/test_contracts/contracts/loadnext/README.md b/core/lib/test_contracts/contracts/loadnext/README.md new file mode 100644 index 000000000000..5918c4f2308a --- /dev/null +++ b/core/lib/test_contracts/contracts/loadnext/README.md @@ -0,0 +1,15 @@ +# Calculating loadtest profiles + +Use the SQL scripts in this directory to calculate the characteristics of transactions within a miniblock range. + +Calculate `CONTRACT_EXECUTION_PARAMS` as follows: + +- `light`: all zeroes. +- `realistic`: median (50th percentile). +- `heavy`: generally use 2.5× the values in the 99th percentile. However, some operations are even less frequent than that (e.g. contract deployments). At the time of writing, contract deployments is set to 5. + +Metrics may be averaged across different block ranges to calculate a more holistic "characteristic." + +## Compensating for implicit activity + +The mere act of executing a transaction entails some ancillary activity on the network. For example, some events are emitted when tokens are transferred for gas payments. The loadtest contract does not compensate for this activity, so it should be kept in mind when evaluating loadtest activity. diff --git a/core/lib/test_contracts/contracts/loadnext/loadnext_contract.sol b/core/lib/test_contracts/contracts/loadnext/loadnext_contract.sol new file mode 100644 index 000000000000..9186ff6180a2 --- /dev/null +++ b/core/lib/test_contracts/contracts/loadnext/loadnext_contract.sol @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; +pragma abicoder v2; + +contract LoadnextContract { + event Event(uint val); + uint[] readArray; + uint[] writeArray; + + constructor(uint reads) { + for (uint i = 0; i < reads; i++) { + readArray.push(i); + } + } + + function execute( + uint reads, + uint initialWrites, + uint repeatedWrites, + uint hashes, + uint events, + uint maxRecursion, + uint deploys + ) external returns (uint) { + if (maxRecursion > 0) { + return + this.execute( + reads, + initialWrites, + repeatedWrites, + hashes, + events, + maxRecursion - 1, + deploys + ); + } + + require(repeatedWrites <= readArray.length); + uint sum = 0; + + // Somehow use result of storage read for compiler to not optimize this place. + for (uint i = 0; i < repeatedWrites; i++) { + uint value = readArray[i]; + sum += value; + readArray[i] = value + 1; + } + for (uint i = repeatedWrites; i < reads; i++) { + sum += readArray[i]; + } + + for (uint i = 0; i < initialWrites; i++) { + writeArray.push(i); + } + + for (uint i = 0; i < events; i++) { + emit Event(i); + } + + // Somehow use result of keccak for compiler to not optimize this place. + for (uint i = 0; i < hashes; i++) { + sum += uint8( + keccak256(abi.encodePacked("Message for encoding"))[0] + ); + } + + for (uint i = 0; i < deploys; i++) { + Foo foo = new Foo(); + } + return sum; + } + + function burnGas(uint256 gasToBurn) external { + uint256 initialGas = gasleft(); + while (initialGas - gasleft() < gasToBurn) {} + } +} + +contract Foo { + string public name = "Foo"; +} diff --git a/core/lib/test_contracts/contracts/loadnext/query_event_metrics.sql b/core/lib/test_contracts/contracts/loadnext/query_event_metrics.sql new file mode 100644 index 000000000000..1a5d87d6fcfb --- /dev/null +++ b/core/lib/test_contracts/contracts/loadnext/query_event_metrics.sql @@ -0,0 +1,19 @@ +-- calculate distribution of event emissions per transaction + +\set :start_from_miniblock_number 40000000 +\set :miniblock_range 10000 + +select stddev_samp(metric) as stddev, + avg(metric) as avg, + sum(metric) as sum, + min(metric) as min, + percentile_cont(0.01) within group (order by metric) as pct_01, + percentile_cont(0.50) within group (order by metric) as pct_50, + percentile_cont(0.99) within group (order by metric) as pct_99, + max(metric) as max +from (select tx.hash, count(ev.*) as metric + from transactions tx + left join events ev on ev.tx_hash = tx.hash + where ev.miniblock_number >= :start_from_miniblock_number + and ev.miniblock_number < :start_from_miniblock_number + :miniblock_range + group by tx.hash) s; diff --git a/core/lib/test_contracts/contracts/loadnext/query_execution_info_metrics.sql b/core/lib/test_contracts/contracts/loadnext/query_execution_info_metrics.sql new file mode 100644 index 000000000000..bf9faba4b6dc --- /dev/null +++ b/core/lib/test_contracts/contracts/loadnext/query_execution_info_metrics.sql @@ -0,0 +1,20 @@ +-- calculate distribution of execution_info fields per transaction + +-- execution_info fields: gas_used, vm_events, cycles_used, storage_logs, l2_to_l1_logs, contracts_used, pubdata_published, total_log_queries, contracts_deployed, l2_l1_long_messages, computational_gas_used, published_bytecode_bytes +\set exection_info_field 'storage_logs' +\set start_from_miniblock_number 40000000 +\set miniblock_range 10000 + +select stddev_samp(metric) as stddev, + avg(metric) as avg, + sum(metric) as sum, + min(metric) as min, + percentile_cont(0.01) within group (order by metric) as pct_01, + percentile_cont(0.50) within group (order by metric) as pct_50, + percentile_cont(0.99) within group (order by metric) as pct_99, + max(metric) as max +from (select tx.miniblock_number, + (execution_info ->> :execution_info_field)::bigint as metric + from transactions tx) cd +where cd.miniblock_number >= :start_from_miniblock_number + and cd.miniblock_number < :start_from_miniblock_number + :miniblock_range; diff --git a/core/lib/test_contracts/contracts/loadnext/query_max_transactions_in_window.sql b/core/lib/test_contracts/contracts/loadnext/query_max_transactions_in_window.sql new file mode 100644 index 000000000000..91dd4bd47a7c --- /dev/null +++ b/core/lib/test_contracts/contracts/loadnext/query_max_transactions_in_window.sql @@ -0,0 +1,23 @@ +-- not a metrics-collecting query, but may be useful to find an interesting range of transactions + +\set miniblock_number_range_start 36700000 +\set miniblock_number_range_end 36850000 +\set window_size 10000 +\set maximize_column l2_tx_count + +select miniblock_number_start, + miniblock_number_start + :window_size as miniblock_number_end, + metric_total +from (select mb.number as miniblock_number_start, + sum(mb.:maximize_column) + over lookahead + as metric_total + from miniblocks mb + where mb.number >= :miniblock_number_range_start + and mb.number < :miniblock_number_range_end + window lookahead as ( + order by mb.number + rows between current row and :window_size following + )) _s +order by metric_total desc +limit 10; diff --git a/core/lib/test_contracts/contracts/loadnext/query_read_metrics_basic.sql b/core/lib/test_contracts/contracts/loadnext/query_read_metrics_basic.sql new file mode 100644 index 000000000000..62195016f10e --- /dev/null +++ b/core/lib/test_contracts/contracts/loadnext/query_read_metrics_basic.sql @@ -0,0 +1,39 @@ +-- calculate distribution of storage reads per transaction +-- does not calculate hot/cold reads + +\set start_from_miniblock_number 40000000 +\set miniblock_range 10000 + +with mb as (select * + from miniblocks mb + where mb.number >= :start_from_miniblock_number + order by mb.number + limit :miniblock_range) +select stddev_samp(metric) as stddev, + avg(metric) as avg, + sum(metric) as sum, + min(metric) as min, + percentile_cont(0.01) within group (order by metric) as pct_01, + percentile_cont(0.50) within group (order by metric) as pct_50, + percentile_cont(0.99) within group (order by metric) as pct_99, + max(metric) as max +from (select miniblock_number, + (sum(read_write_logs) - sum(write_logs)) / sum(transaction_count) as metric, + sum(transaction_count) as transaction_count + from (select mb.number as miniblock_number, + (tx.execution_info ->> 'storage_logs')::bigint as read_write_logs, + null as write_logs, + 1 as transaction_count + from transactions tx, + mb + where tx.miniblock_number = mb.number + union + select mb.number as miniblock_number, + null as read_write_logs, + count(sl.*) as write_logs, + 0 as transaction_count + from storage_logs sl, + mb + where sl.miniblock_number = mb.number + group by mb.number) s + group by s.miniblock_number) t, generate_series(1, t.transaction_count); diff --git a/core/lib/test_contracts/contracts/loadnext/query_write_metrics.sql b/core/lib/test_contracts/contracts/loadnext/query_write_metrics.sql new file mode 100644 index 000000000000..f142347f9801 --- /dev/null +++ b/core/lib/test_contracts/contracts/loadnext/query_write_metrics.sql @@ -0,0 +1,50 @@ +-- calculate distribution of initial and repeated writes per transaction + +\set start_from_miniblock_number 40000000; +\set miniblock_range 10000; + +select + -- initial writes + stddev_samp(initial_writes_per_tx) as initial_writes_stddev, + avg(initial_writes_per_tx) as initial_writes_avg, + min(initial_writes_per_tx) as initial_writes_min, + percentile_cont(0.01) within group (order by initial_writes_per_tx) as initial_writes_pct_01, + percentile_cont(0.50) within group (order by initial_writes_per_tx) as initial_writes_pct_50, + percentile_cont(0.99) within group (order by initial_writes_per_tx) as initial_writes_pct_99, + max(initial_writes_per_tx) as initial_writes_max, + + -- repeated writes + stddev_samp(repeated_writes_per_tx) as repeated_writes_stddev, + avg(repeated_writes_per_tx) as repeated_writes_avg, + min(repeated_writes_per_tx) as repeated_writes_min, + percentile_cont(0.01) within group (order by repeated_writes_per_tx) as repeated_writes_pct_01, + percentile_cont(0.50) within group (order by repeated_writes_per_tx) as repeated_writes_pct_50, + percentile_cont(0.99) within group (order by repeated_writes_per_tx) as repeated_writes_pct_99, + max(repeated_writes_per_tx) as repeated_writes_max +from (select initial_writes::real / l2_tx_count::real as initial_writes_per_tx, + (total_writes - initial_writes)::real / l2_tx_count::real as repeated_writes_per_tx + from (select mb.number as miniblock_number, + count(sl.hashed_key) as total_writes, + count(distinct sl.hashed_key) filter ( + where + iw.hashed_key is not null + ) as initial_writes, + mb.l2_tx_count as l2_tx_count + from miniblocks mb + join l1_batches l1b on l1b.number = mb.l1_batch_number + join storage_logs sl on sl.miniblock_number = mb.number + left join initial_writes iw on iw.hashed_key = sl.hashed_key + and iw.l1_batch_number = mb.l1_batch_number + and mb.number = ( + -- initial writes are only tracked by l1 batch number, so find the first miniblock in that batch that contains a write to that key + select miniblock_number + from storage_logs + where hashed_key = sl.hashed_key + order by miniblock_number + limit 1) + where mb.l2_tx_count <> 0 -- avoid div0 + and mb.number >= :start_from_miniblock_number + group by mb.number + order by mb.number desc + limit :miniblock_range) s, generate_series(1, s.l2_tx_count) -- scale by # of tx + ) t; diff --git a/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol b/core/lib/test_contracts/contracts/mock-evm/mock-evm.sol similarity index 70% rename from etc/contracts-test-data/contracts/mock-evm/mock-evm.sol rename to core/lib/test_contracts/contracts/mock-evm/mock-evm.sol index baa0d37b7530..3a7ee40db228 100644 --- a/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol +++ b/core/lib/test_contracts/contracts/mock-evm/mock-evm.sol @@ -90,6 +90,25 @@ contract MockContractDeployer { ACCOUNT_CODE_STORAGE_CONTRACT.storeAccountConstructedCodeHash(newAddress, _salt); return newAddress; } + + bytes32 constant CREATE2_PREFIX = keccak256("zksyncCreate2"); + + /// Mocks `create2` with real counterpart semantics, other than bytecode passed in `_input`. + /// @param _input bytecode to publish + function create2( + bytes32 _salt, + bytes32 _bytecodeHash, + bytes calldata _input + ) external payable returns (address newAddress) { + KNOWN_CODE_STORAGE_CONTRACT.setEVMBytecodeHash(_bytecodeHash); + KNOWN_CODE_STORAGE_CONTRACT.publishEVMBytecode(_input); + + bytes32 hash = keccak256( + bytes.concat(CREATE2_PREFIX, bytes32(uint256(uint160(msg.sender))), _salt, _bytecodeHash) + ); + newAddress = address(uint160(uint256(hash))); + ACCOUNT_CODE_STORAGE_CONTRACT.storeAccountConstructedCodeHash(newAddress, _bytecodeHash); + } } interface IAccountCodeStorage { @@ -101,6 +120,16 @@ interface IRecursiveContract { function recurse(uint _depth) external returns (uint); } +interface IRecursiveDeployment { + struct EvmDeployment { + bytes32 bytecodeHash; + /// Has fixed length to enable array slicing. + bytes32 bytecode; + } + + function testRecursiveDeployment(EvmDeployment[] calldata _deployments) external; +} + /// Native incrementing library. Not actually a library to simplify deployment. contract IncrementingContract { // Should not collide with other storage slots @@ -154,7 +183,7 @@ uint constant EVM_EMULATOR_STIPEND = 1 << 30; /** * Mock EVM emulator used in low-level tests. */ -contract MockEvmEmulator is IRecursiveContract, IncrementingContract { +contract MockEvmEmulator is IRecursiveContract, IRecursiveDeployment, IncrementingContract { IAccountCodeStorage constant ACCOUNT_CODE_STORAGE_CONTRACT = IAccountCodeStorage(address(0x8002)); /// Set to `true` for testing logic sanity. @@ -210,7 +239,11 @@ contract MockEvmEmulator is IRecursiveContract, IncrementingContract { MockContractDeployer constant CONTRACT_DEPLOYER_CONTRACT = MockContractDeployer(address(0x8006)); /// Emulates EVM contract deployment and a subsequent call to it in a single transaction. - function testDeploymentAndCall(bytes32 _evmBytecodeHash, bytes calldata _evmBytecode) external validEvmEntry { + function testDeploymentAndCall( + bytes32 _evmBytecodeHash, + bytes calldata _evmBytecode, + bool _revert + ) external validEvmEntry { IRecursiveContract newContract = IRecursiveContract(CONTRACT_DEPLOYER_CONTRACT.create( _evmBytecodeHash, _evmBytecodeHash, @@ -222,6 +255,69 @@ contract MockEvmEmulator is IRecursiveContract, IncrementingContract { uint gasToSend = gasleft() - EVM_EMULATOR_STIPEND; require(newContract.recurse{gas: gasToSend}(5) == 120, "unexpected recursive result"); + require(!_revert, "requested revert"); + } + + function testCallToPreviousDeployment() external validEvmEntry { + IRecursiveContract newContract = IRecursiveContract(address(uint160(address(this)) + 1)); + require(address(newContract).code.length > 0, "contract code length"); + require(address(newContract).codehash != bytes32(0), "contract code hash"); + + uint gasToSend = gasleft() - EVM_EMULATOR_STIPEND; + require(newContract.recurse{gas: gasToSend}(5) == 120, "unexpected recursive result"); + } + + function testRecursiveDeployment(EvmDeployment[] calldata _deployments) external override validEvmEntry { + if (_deployments.length == 0) { + return; + } + + IRecursiveDeployment newContract = IRecursiveDeployment(CONTRACT_DEPLOYER_CONTRACT.create( + _deployments[0].bytecodeHash, + _deployments[0].bytecodeHash, + bytes.concat(_deployments[0].bytecode) + )); + uint gasToSend = gasleft() - EVM_EMULATOR_STIPEND; + newContract.testRecursiveDeployment{gas: gasToSend}(_deployments[1:]); + } + + function testDeploymentWithPartialRevert( + EvmDeployment[] calldata _deployments, + bool[] calldata _shouldRevert + ) external validEvmEntry { + require(_deployments.length == _shouldRevert.length, "length mismatch"); + + for (uint i = 0; i < _deployments.length; i++) { + uint gasToSend = gasleft() - EVM_EMULATOR_STIPEND; + try this.deployThenRevert{gas: gasToSend}( + _deployments[i], + bytes32(i), + _shouldRevert[i] + ) returns(address newAddress) { + require(!_shouldRevert[i], "unexpected deploy success"); + require(newAddress.code.length > 0, "contract code length"); + require(newAddress.codehash != bytes32(0), "contract code hash"); + } catch Error(string memory reason) { + require(_shouldRevert[i], "unexpected revert"); + require(keccak256(bytes(reason)) == keccak256("requested revert"), "unexpected error"); + } + } + } + + function deployThenRevert( + EvmDeployment calldata _deployment, + bytes32 _salt, + bool _shouldRevert + ) external validEvmEntry returns (address newAddress) { + newAddress = CONTRACT_DEPLOYER_CONTRACT.create2( + _salt, + _deployment.bytecodeHash, + bytes.concat(_deployment.bytecode) + ); + require(newAddress.code.length > 0, "contract code length"); + require(newAddress.codehash != bytes32(0), "contract code hash"); + + require(!_shouldRevert, "requested revert"); } fallback() external validEvmEntry { diff --git a/etc/contracts-test-data/contracts/precompiles/precompiles.sol b/core/lib/test_contracts/contracts/precompiles/precompiles.sol similarity index 100% rename from etc/contracts-test-data/contracts/precompiles/precompiles.sol rename to core/lib/test_contracts/contracts/precompiles/precompiles.sol diff --git a/etc/contracts-test-data/contracts/simple-transfer/simple-transfer.sol b/core/lib/test_contracts/contracts/simple-transfer/simple-transfer.sol similarity index 90% rename from etc/contracts-test-data/contracts/simple-transfer/simple-transfer.sol rename to core/lib/test_contracts/contracts/simple-transfer/simple-transfer.sol index 591e97cc1ae9..8ab5bf330e02 100644 --- a/etc/contracts-test-data/contracts/simple-transfer/simple-transfer.sol +++ b/core/lib/test_contracts/contracts/simple-transfer/simple-transfer.sol @@ -19,7 +19,8 @@ contract SimpleTransfer { // Function to withdraw Ether to the owner's address function withdraw(uint _amount) public onlyOwner { require(address(this).balance >= _amount, "Insufficient balance in contract"); - payable(owner).transfer(_amount); + (bool success, ) = owner.call{value: _amount}(""); + require(success, "transfer reverted"); } // Function to transfer Ether from this contract to any address diff --git a/etc/contracts-test-data/contracts/storage/storage.sol b/core/lib/test_contracts/contracts/storage/storage.sol similarity index 96% rename from etc/contracts-test-data/contracts/storage/storage.sol rename to core/lib/test_contracts/contracts/storage/storage.sol index 2f386f5c732b..f1c629aeb2c6 100644 --- a/etc/contracts-test-data/contracts/storage/storage.sol +++ b/core/lib/test_contracts/contracts/storage/storage.sol @@ -37,7 +37,7 @@ contract StorageTester { } // This test aims to check that the tstore/sstore are writing into separate spaces. - function testTrasientAndNonTransientStore() external { + function testTransientAndNonTransientStore() external { value = 100; uint256 x; @@ -95,7 +95,7 @@ contract StorageTester { } function testTransientStore() external { - this.testTrasientAndNonTransientStore(); + this.testTransientAndNonTransientStore(); this.testTstoreRollback(); } diff --git a/etc/contracts-test-data/contracts/transfer/transfer.sol b/core/lib/test_contracts/contracts/transfer/transfer.sol similarity index 97% rename from etc/contracts-test-data/contracts/transfer/transfer.sol rename to core/lib/test_contracts/contracts/transfer/transfer.sol index 4c63a2e9c7d1..964fb3b01667 100644 --- a/etc/contracts-test-data/contracts/transfer/transfer.sol +++ b/core/lib/test_contracts/contracts/transfer/transfer.sol @@ -9,12 +9,11 @@ contract TransferTest { function send(address payable to, uint256 amount) public payable { bool success = to.send(amount); - require(success, "Transaction failed"); } receive() external payable { - + // Do nothing } } diff --git a/core/lib/test_contracts/src/contracts.rs b/core/lib/test_contracts/src/contracts.rs new file mode 100644 index 000000000000..09a0535824df --- /dev/null +++ b/core/lib/test_contracts/src/contracts.rs @@ -0,0 +1,314 @@ +//! Test contracts. + +use ethabi::Token; +use once_cell::sync::Lazy; +use serde::{Deserialize, Serialize}; +use zksync_types::{Execute, H256, U256}; + +/// The structure of produced modules is as follows: +/// +/// - Each dir in `/contracts` translates into a module with the same name (just with `-` chars replaced with `_`). +/// - Each contract in all files in this dir produces a `RawContract` constant with the same name as the contract. +mod raw { + #![allow(unused, non_upper_case_globals)] + include!(concat!(env!("OUT_DIR"), "/raw_contracts.rs")); +} + +/// Raw contracts produced by the build script. +#[derive(Debug, Clone, Copy)] +pub(crate) struct RawContract { + pub abi: &'static str, + pub bytecode: &'static [u8], +} + +/// Test contract consisting of deployable EraVM bytecode and Web3 ABI. +#[derive(Debug, Clone)] +#[non_exhaustive] +pub struct TestContract { + /// Web3 ABI of this contract. + pub abi: ethabi::Contract, + /// EraVM bytecode of this contract. + pub bytecode: &'static [u8], + /// Contract dependencies (i.e., potential factory deps to be included in the contract deployment / transactions). + pub dependencies: Vec, +} + +impl TestContract { + fn new(raw: RawContract) -> Self { + let abi = serde_json::from_str(raw.abi).expect("failed parsing contract ABI"); + Self { + abi, + bytecode: raw.bytecode, + dependencies: vec![], + } + } + + /// Returns a contract used to test complex system contract upgrades. + pub fn complex_upgrade() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::complex_upgrade::ComplexUpgrade)); + &CONTRACT + } + + /// Returns a contract used to test context methods. + pub fn context_test() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::context::Context)); + &CONTRACT + } + + /// Returns a simple counter contract. + pub fn counter() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::counter::Counter)); + &CONTRACT + } + + /// Returns a contract used in load testing that emulates various kinds of expensive operations + /// (storage reads / writes, hashing, recursion via far calls etc.). + pub fn load_test() -> &'static Self { + static CONTRACT: Lazy = Lazy::new(|| { + let mut contract = TestContract::new(raw::loadnext::LoadnextContract); + contract.dependencies = vec![TestContract::new(raw::loadnext::Foo)]; + contract + }); + &CONTRACT + } + + /// Returns a contract with expensive storage operations. + pub fn expensive() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::expensive::Expensive)); + &CONTRACT + } + + pub fn failed_call() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::failed_call::FailedCall)); + &CONTRACT + } + + /// Returns a contract with an infinite loop (useful for testing out-of-gas reverts). + pub fn infinite_loop() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::infinite::InfiniteLoop)); + &CONTRACT + } + + /// Returns a custom account with multiple owners. + pub fn many_owners() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::custom_account::ManyOwnersCustomAccount)); + &CONTRACT + } + + /// Returns a contract testing `msg.sender` value. + pub fn msg_sender_test() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::complex_upgrade::MsgSenderTest)); + &CONTRACT + } + + pub fn nonce_holder() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::custom_account::NonceHolderTest)); + &CONTRACT + } + + /// Returns a contract testing precompiles. + pub fn precompiles_test() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::precompiles::Precompiles)); + &CONTRACT + } + + /// Returns a contract proxying calls to a [counter](Self::counter()). + pub fn proxy_counter() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::counter::ProxyCounter)); + &CONTRACT + } + + /// Returns a reentrant recipient for transfers. + pub fn reentrant_recipient() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::transfer::ReentrantRecipient)); + &CONTRACT + } + + /// Returns a contract testing reverts. + pub fn reverts_test() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::error::SimpleRequire)); + &CONTRACT + } + + /// Returns a simple fungible token contract. + pub fn simple_transfer() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::simple_transfer::SimpleTransfer)); + &CONTRACT + } + + /// Returns a contract testing storage operations. + pub fn storage_test() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::storage::StorageTester)); + &CONTRACT + } + + /// Returns a contract for testing base token transfers. + pub fn transfer_test() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::transfer::TransferTest)); + &CONTRACT + } + + /// Returns a test recipient for the [transfer test](Self::transfer_test()) contract. + pub fn transfer_recipient() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::transfer::Recipient)); + &CONTRACT + } + + /// Returns a mock version of `ContractDeployer`. + pub fn mock_deployer() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::mock_evm::MockContractDeployer)); + &CONTRACT + } + + /// Returns a mock version of `KnownCodeStorage`. + pub fn mock_known_code_storage() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::mock_evm::MockKnownCodeStorage)); + &CONTRACT + } + + /// Returns a mock EVM emulator. + pub fn mock_evm_emulator() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::mock_evm::MockEvmEmulator)); + &CONTRACT + } + + /// Contract testing recursive calls. + pub fn recursive_test() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::mock_evm::NativeRecursiveContract)); + &CONTRACT + } + + /// Contract implementing incrementing operations. Used to test static / delegate calls. + pub fn increment_test() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::mock_evm::IncrementingContract)); + &CONTRACT + } + + /// Returns all factory deps for this contract deployment (including its own bytecode). + pub fn factory_deps(&self) -> Vec> { + let mut deps = vec![]; + self.insert_factory_deps(&mut deps); + deps + } + + fn insert_factory_deps(&self, dest: &mut Vec>) { + for deployed in &self.dependencies { + dest.push(deployed.bytecode.to_vec()); + deployed.insert_factory_deps(dest); + } + } + + /// Generates the `Execute` payload for deploying this contract with zero salt. + pub fn deploy_payload(&self, args: &[Token]) -> Execute { + self.deploy_payload_with_salt(H256::zero(), args) + } + + /// Generates the `Execute` payload for deploying this contract with custom salt. + pub fn deploy_payload_with_salt(&self, salt: H256, args: &[Token]) -> Execute { + let mut execute = Execute::for_deploy(salt, self.bytecode.to_vec(), args); + execute.factory_deps.extend(self.factory_deps()); + execute + } + + /// Shortcut for accessing a function that panics if a function doesn't exist. + pub fn function(&self, name: &str) -> ðabi::Function { + self.abi + .function(name) + .unwrap_or_else(|err| panic!("cannot access function `{name}`: {err}")) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LoadnextContractExecutionParams { + pub reads: usize, + pub initial_writes: usize, + pub repeated_writes: usize, + pub events: usize, + pub hashes: usize, + pub recursive_calls: usize, + pub deploys: usize, +} + +impl LoadnextContractExecutionParams { + pub fn empty() -> Self { + Self { + reads: 0, + initial_writes: 0, + repeated_writes: 0, + events: 0, + hashes: 0, + recursive_calls: 0, + deploys: 0, + } + } +} + +impl Default for LoadnextContractExecutionParams { + fn default() -> Self { + Self { + reads: 10, + initial_writes: 10, + repeated_writes: 10, + events: 10, + hashes: 10, + recursive_calls: 1, + deploys: 1, + } + } +} + +impl LoadnextContractExecutionParams { + pub fn to_bytes(&self) -> Vec { + let contract_function = TestContract::load_test().abi.function("execute").unwrap(); + + let params = vec![ + Token::Uint(U256::from(self.reads)), + Token::Uint(U256::from(self.initial_writes)), + Token::Uint(U256::from(self.repeated_writes)), + Token::Uint(U256::from(self.hashes)), + Token::Uint(U256::from(self.events)), + Token::Uint(U256::from(self.recursive_calls)), + Token::Uint(U256::from(self.deploys)), + ]; + + contract_function + .encode_input(¶ms) + .expect("failed to encode parameters") + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn contracts_are_initialized_correctly() { + TestContract::counter().abi.function("get").unwrap(); + TestContract::context_test() + .abi + .function("getBlockNumber") + .unwrap(); + } +} diff --git a/core/tests/test_account/src/lib.rs b/core/lib/test_contracts/src/lib.rs similarity index 80% rename from core/tests/test_account/src/lib.rs rename to core/lib/test_contracts/src/lib.rs index 39a366945263..223bd92a651a 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/lib/test_contracts/src/lib.rs @@ -1,17 +1,17 @@ use ethabi::Token; -use zksync_contracts::{ - deployer_contract, load_contract, test_contracts::LoadnextContractExecutionParams, -}; use zksync_eth_signer::{PrivateKeySigner, TransactionParameters}; use zksync_system_constants::{ - CONTRACT_DEPLOYER_ADDRESS, DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, + DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, }; use zksync_types::{ - abi, fee::Fee, l2::L2Tx, utils::deployed_address_create, Address, Execute, K256PrivateKey, - L2ChainId, Nonce, Transaction, H256, PRIORITY_OPERATION_L2_TX_TYPE, U256, + abi, address_to_u256, bytecode::BytecodeHash, fee::Fee, l2::L2Tx, + utils::deployed_address_create, Address, Execute, K256PrivateKey, L2ChainId, Nonce, + Transaction, H256, PRIORITY_OPERATION_L2_TX_TYPE, U256, }; -use zksync_utils::{address_to_u256, bytecode::hash_bytecode, h256_to_u256}; + +pub use self::contracts::{LoadnextContractExecutionParams, TestContract}; + +mod contracts; pub const L1_TEST_GAS_PER_PUBDATA_BYTE: u32 = 800; const BASE_FEE: u64 = 2_000_000_000; @@ -54,6 +54,12 @@ impl Account { Self::new(K256PrivateKey::random_using(rng)) } + /// Creates an account deterministically from the provided seed. + pub fn from_seed(seed: u32) -> Self { + let private_key_bytes = H256::from_low_u64_be(u64::from(seed) + 1); + Self::new(K256PrivateKey::from_bytes(private_key_bytes).unwrap()) + } + pub fn get_l2_tx_for_execute(&mut self, execute: Execute, fee: Option) -> Transaction { let tx = self.get_l2_tx_for_execute_with_nonce(execute, fee, self.nonce); self.nonce += 1; @@ -109,31 +115,13 @@ impl Account { &mut self, code: &[u8], calldata: Option<&[Token]>, - mut factory_deps: Vec>, + factory_deps: Vec>, tx_type: TxType, ) -> DeployContractsTx { - let deployer = deployer_contract(); - - let contract_function = deployer.function("create").unwrap(); - - let calldata = calldata.map(ethabi::encode); - let code_hash = hash_bytecode(code); - let params = [ - Token::FixedBytes(vec![0u8; 32]), - Token::FixedBytes(code_hash.0.to_vec()), - Token::Bytes(calldata.unwrap_or_default().to_vec()), - ]; - factory_deps.push(code.to_vec()); - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), - calldata, - factory_deps, - value: U256::zero(), - }; + let calldata = calldata.unwrap_or_default(); + let code_hash = BytecodeHash::for_bytecode(code).value(); + let mut execute = Execute::for_deploy(H256::zero(), code.to_vec(), calldata); + execute.factory_deps.extend(factory_deps); let tx = match tx_type { TxType::L2 => self.get_l2_tx_for_execute(execute, None), @@ -178,7 +166,7 @@ impl Account { signature: vec![], factory_deps: factory_deps .iter() - .map(|b| h256_to_u256(hash_bytecode(b))) + .map(|b| BytecodeHash::for_bytecode(b).value_u256()) .collect(), paymaster_input: vec![], reserved_dynamic: vec![], @@ -198,16 +186,15 @@ impl Account { payable: bool, tx_type: TxType, ) -> Transaction { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json", - ); + let test_contract = TestContract::counter(); let function = if payable { test_contract + .abi .function("incrementWithRevertPayable") .unwrap() } else { - test_contract.function("incrementWithRevert").unwrap() + test_contract.abi.function("incrementWithRevert").unwrap() }; let calldata = function diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index 54c38384a7ad..325fe22209a7 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -11,12 +11,11 @@ keywords.workspace = true categories.workspace = true [dependencies] +# **IMPORTANT.** Please do not add dependency on `zksync_config` etc. This crate has a heavy dependency graph as is. zksync_system_constants.workspace = true -zksync_utils.workspace = true zksync_basic_types.workspace = true zksync_contracts.workspace = true zksync_mini_merkle_tree.workspace = true -zksync_config.workspace = true zksync_protobuf.workspace = true zksync_crypto_primitives.workspace = true @@ -39,7 +38,6 @@ itertools.workspace = true tracing.workspace = true # Crypto stuff -secp256k1.workspace = true blake2.workspace = true [dev-dependencies] diff --git a/core/lib/types/src/abi.rs b/core/lib/types/src/abi.rs index 84f8aba64869..92d4cb4c8612 100644 --- a/core/lib/types/src/abi.rs +++ b/core/lib/types/src/abi.rs @@ -1,7 +1,7 @@ use anyhow::Context as _; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; use crate::{ + bytecode::BytecodeHash, ethabi, ethabi::{ParamType, Token}, transaction_request::TransactionRequest, @@ -356,7 +356,7 @@ impl Transaction { // verify data integrity let factory_deps_hashes: Vec<_> = factory_deps .iter() - .map(|b| h256_to_u256(hash_bytecode(b))) + .map(|b| BytecodeHash::for_bytecode(b).value_u256()) .collect(); anyhow::ensure!(tx.factory_deps == factory_deps_hashes); tx.hash() diff --git a/core/lib/types/src/api/en.rs b/core/lib/types/src/api/en.rs index 209ab7c24f98..daaa5651a032 100644 --- a/core/lib/types/src/api/en.rs +++ b/core/lib/types/src/api/en.rs @@ -1,7 +1,7 @@ //! API types related to the External Node specific methods. use serde::{Deserialize, Serialize}; -use zksync_basic_types::{Address, L1BatchNumber, L2BlockNumber, H256}; +use zksync_basic_types::{commitment::PubdataParams, Address, L1BatchNumber, L2BlockNumber, H256}; use zksync_contracts::BaseSystemContractsHashes; use crate::ProtocolVersionId; @@ -42,6 +42,8 @@ pub struct SyncBlock { pub hash: Option, /// Version of the protocol used for this block. pub protocol_version: ProtocolVersionId, + /// Pubdata params used for this batch + pub pubdata_params: Option, } /// Global configuration of the consensus served by the main node to the external nodes. diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 1c7672264cb4..5f81e889b537 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -206,6 +206,7 @@ pub struct BridgeAddresses { pub l2_erc20_default_bridge: Option
, pub l1_weth_bridge: Option
, pub l2_weth_bridge: Option
, + pub l2_legacy_shared_bridge: Option
, } #[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] @@ -256,8 +257,6 @@ pub struct TransactionReceipt { pub l2_to_l1_logs: Vec, /// Status: either 1 (success) or 0 (failure). pub status: U64, - /// State root. - pub root: H256, /// Logs bloom #[serde(rename = "logsBloom")] pub logs_bloom: Bloom, @@ -516,6 +515,9 @@ pub struct Transaction { pub gas: U256, /// Input data pub input: Bytes, + /// The parity (0 for even, 1 for odd) of the y-value of the secp256k1 signature + #[serde(rename = "yParity", default, skip_serializing_if = "Option::is_none")] + pub y_parity: Option, /// ECDSA recovery id #[serde(default, skip_serializing_if = "Option::is_none")] pub v: Option, @@ -644,7 +646,7 @@ pub struct ProtocolVersion { /// Verifier configuration #[deprecated] pub verification_keys_hashes: Option, - /// Hashes of base system contracts (bootloader, default account and evm simulator) + /// Hashes of base system contracts (bootloader, default account and evm emulator) #[deprecated] pub base_system_contracts: Option, /// Bootloader code hash diff --git a/core/lib/types/src/api/state_override.rs b/core/lib/types/src/api/state_override.rs index f2986610840a..69025d1a1f78 100644 --- a/core/lib/types/src/api/state_override.rs +++ b/core/lib/types/src/api/state_override.rs @@ -1,10 +1,12 @@ use std::collections::HashMap; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -use zksync_basic_types::{web3::Bytes, H256, U256}; -use zksync_utils::bytecode::{hash_bytecode, validate_bytecode, InvalidBytecodeError}; +use zksync_basic_types::{bytecode::BytecodeHash, web3::Bytes, H256, U256}; -use crate::Address; +use crate::{ + bytecode::{validate_bytecode, InvalidBytecodeError}, + Address, +}; /// Collection of overridden accounts. #[derive(Debug, Clone, Default, Serialize, Deserialize)] @@ -44,7 +46,7 @@ impl Bytecode { /// Returns the canonical hash of this bytecode. pub fn hash(&self) -> H256 { - hash_bytecode(&self.0 .0) + BytecodeHash::for_bytecode(&self.0 .0).value() } /// Converts this bytecode into bytes. diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index 9211a6f1d8cf..804da61b7295 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -1,16 +1,15 @@ use std::{fmt, ops}; use serde::{Deserialize, Serialize}; -use zksync_basic_types::{Address, Bloom, BloomInput, H256, U256}; +use zksync_basic_types::{commitment::PubdataParams, Address, Bloom, BloomInput, H256, U256}; use zksync_contracts::BaseSystemContractsHashes; use zksync_system_constants::SYSTEM_BLOCK_INFO_BLOCK_NUMBER_MULTIPLIER; -use zksync_utils::concat_and_hash; use crate::{ fee_model::BatchFeeInput, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, priority_op_onchain_data::PriorityOpOnchainData, - web3::keccak256, + web3::{keccak256, keccak256_concat}, AccountTreeId, L1BatchNumber, L2BlockNumber, ProtocolVersionId, Transaction, }; @@ -113,6 +112,7 @@ pub struct L2BlockHeader { /// amount of gas can be spent on pubdata. pub gas_limit: u64, pub logs_bloom: Bloom, + pub pubdata_params: PubdataParams, } /// Structure that represents the data is returned by the storage oracle during batch execution. @@ -252,7 +252,7 @@ impl L2BlockHasher { /// Updates this hasher with a transaction hash. This should be called for all transactions in the block /// in the order of their execution. pub fn push_tx_hash(&mut self, tx_hash: H256) { - self.txs_rolling_hash = concat_and_hash(self.txs_rolling_hash, tx_hash); + self.txs_rolling_hash = keccak256_concat(self.txs_rolling_hash, tx_hash); } /// Returns the hash of the L2 block. diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index 759ee8947ba9..1eba7e7a9ec0 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -9,22 +9,23 @@ use std::{collections::HashMap, convert::TryFrom}; use serde::{Deserialize, Serialize}; -pub use zksync_basic_types::commitment::L1BatchCommitmentMode; +pub use zksync_basic_types::commitment::{L1BatchCommitmentMode, PubdataParams}; use zksync_contracts::BaseSystemContractsHashes; +use zksync_crypto_primitives::hasher::{keccak::KeccakHasher, Hasher}; use zksync_mini_merkle_tree::MiniMerkleTree; use zksync_system_constants::{ - KNOWN_CODES_STORAGE_ADDRESS, L2_TO_L1_LOGS_TREE_ROOT_KEY, STATE_DIFF_HASH_KEY, + KNOWN_CODES_STORAGE_ADDRESS, L2_TO_L1_LOGS_TREE_ROOT_KEY, STATE_DIFF_HASH_KEY_PRE_GATEWAY, ZKPORTER_IS_AVAILABLE, }; -use zksync_utils::u256_to_h256; use crate::{ blob::num_blobs_required, block::{L1BatchHeader, L1BatchTreeData}, l2_to_l1_log::{ - l2_to_l1_logs_tree_size, parse_system_logs_for_blob_hashes, L2ToL1Log, SystemL2ToL1Log, - UserL2ToL1Log, + l2_to_l1_logs_tree_size, parse_system_logs_for_blob_hashes_pre_gateway, L2ToL1Log, + SystemL2ToL1Log, UserL2ToL1Log, }, + u256_to_h256, web3::keccak256, writes::{ compress_state_diffs, InitialStorageWrite, RepeatedStorageWrite, StateDiffRecord, @@ -92,6 +93,16 @@ pub struct L1BatchMetadata { /// commitment to the transactions in the batch. pub bootloader_initial_content_commitment: Option, pub state_diffs_compressed: Vec, + /// Hash of packed state diffs. It's present only for post-gateway batches. + pub state_diff_hash: Option, + /// Root hash of the local logs tree. Tree contains logs that were produced on this chain. + /// It's present only for post-gateway batches. + pub local_root: Option, + /// Root hash of the aggregated logs tree. Tree aggregates `local_root`s of chains that settle on this chain. + /// It's present only for post-gateway batches. + pub aggregation_root: Option, + /// Data Availability inclusion proof, that has to be verified on the settlement layer. + pub da_inclusion_data: Option>, } impl L1BatchMetadata { @@ -265,6 +276,13 @@ pub struct L1BatchAuxiliaryCommonOutput { protocol_version: ProtocolVersionId, } +#[derive(Debug, Default, Clone, Copy, Eq, PartialEq)] +#[cfg_attr(test, derive(Serialize, Deserialize))] +pub struct BlobHash { + pub commitment: H256, + pub linear_hash: H256, +} + /// Block Output produced by Virtual Machine #[derive(Debug, Clone, Eq, PartialEq)] #[cfg_attr(test, derive(Serialize, Deserialize))] @@ -283,8 +301,9 @@ pub enum L1BatchAuxiliaryOutput { state_diffs_compressed: Vec, state_diffs_hash: H256, aux_commitments: AuxCommitments, - blob_linear_hashes: Vec, - blob_commitments: Vec, + blob_hashes: Vec, + aggregation_root: H256, + local_root: H256, }, } @@ -333,17 +352,23 @@ impl L1BatchAuxiliaryOutput { system_logs, state_diffs, aux_commitments, - blob_commitments, + blob_hashes, + aggregation_root, } => { let l2_l1_logs_compressed = serialize_commitments(&common_input.l2_to_l1_logs); let merkle_tree_leaves = l2_l1_logs_compressed .chunks(UserL2ToL1Log::SERIALIZED_SIZE) .map(|chunk| <[u8; UserL2ToL1Log::SERIALIZED_SIZE]>::try_from(chunk).unwrap()); - let l2_l1_logs_merkle_root = MiniMerkleTree::new( + let local_root = MiniMerkleTree::new( merkle_tree_leaves, Some(l2_to_l1_logs_tree_size(common_input.protocol_version)), ) .merkle_root(); + let l2_l1_logs_merkle_root = if common_input.protocol_version.is_pre_gateway() { + local_root + } else { + KeccakHasher.compress(&local_root, &aggregation_root) + }; let common_output = L1BatchAuxiliaryCommonOutput { l2_l1_logs_merkle_root, @@ -357,22 +382,33 @@ impl L1BatchAuxiliaryOutput { let state_diffs_hash = H256::from(keccak256(&(state_diffs_packed))); let state_diffs_compressed = compress_state_diffs(state_diffs); - let blob_linear_hashes = - parse_system_logs_for_blob_hashes(&common_input.protocol_version, &system_logs); - // Sanity checks. System logs are empty for the genesis batch, so we can't do checks for it. if !system_logs.is_empty() { - let state_diff_hash_from_logs = system_logs - .iter() - .find_map(|log| { - (log.0.key == u256_to_h256(STATE_DIFF_HASH_KEY.into())) - .then_some(log.0.value) - }) - .expect("Failed to find state diff hash in system logs"); - assert_eq!( - state_diffs_hash, state_diff_hash_from_logs, - "State diff hash mismatch" - ); + if common_input.protocol_version.is_pre_gateway() { + let state_diff_hash_from_logs = system_logs + .iter() + .find_map(|log| { + (log.0.key == u256_to_h256(STATE_DIFF_HASH_KEY_PRE_GATEWAY.into())) + .then_some(log.0.value) + }) + .expect("Failed to find state diff hash in system logs"); + assert_eq!( + state_diffs_hash, state_diff_hash_from_logs, + "State diff hash mismatch" + ); + + let blob_linear_hashes_from_logs = + parse_system_logs_for_blob_hashes_pre_gateway( + &common_input.protocol_version, + &system_logs, + ); + let blob_linear_hashes: Vec<_> = + blob_hashes.iter().map(|b| b.linear_hash).collect(); + assert_eq!( + blob_linear_hashes, blob_linear_hashes_from_logs, + "Blob linear hashes mismatch" + ); + } let l2_to_l1_logs_tree_root_from_logs = system_logs .iter() @@ -387,25 +423,45 @@ impl L1BatchAuxiliaryOutput { ); } - assert_eq!( - blob_linear_hashes.len(), - blob_commitments.len(), - "Blob linear hashes and commitments have different lengths" - ); - Self::PostBoojum { common: common_output, system_logs_linear_hash, state_diffs_compressed, state_diffs_hash, aux_commitments, - blob_linear_hashes, - blob_commitments, + blob_hashes, + local_root, + aggregation_root, } } } } + pub fn local_root(&self) -> H256 { + match self { + Self::PreBoojum { common, .. } => common.l2_l1_logs_merkle_root, + Self::PostBoojum { local_root, .. } => *local_root, + } + } + + pub fn aggregation_root(&self) -> H256 { + match self { + Self::PreBoojum { .. } => H256::zero(), + Self::PostBoojum { + aggregation_root, .. + } => *aggregation_root, + } + } + + pub fn state_diff_hash(&self) -> H256 { + match self { + Self::PreBoojum { .. } => H256::zero(), + Self::PostBoojum { + state_diffs_hash, .. + } => *state_diffs_hash, + } + } + pub fn to_bytes(&self) -> Vec { let mut result = Vec::new(); @@ -426,8 +482,7 @@ impl L1BatchAuxiliaryOutput { system_logs_linear_hash, state_diffs_hash, aux_commitments, - blob_linear_hashes, - blob_commitments, + blob_hashes, .. } => { result.extend(system_logs_linear_hash.as_bytes()); @@ -439,9 +494,9 @@ impl L1BatchAuxiliaryOutput { ); result.extend(aux_commitments.events_queue_commitment.as_bytes()); - for i in 0..blob_commitments.len() { - result.extend(blob_linear_hashes[i].as_bytes()); - result.extend(blob_commitments[i].as_bytes()); + for b in blob_hashes { + result.extend(b.linear_hash.as_bytes()); + result.extend(b.commitment.as_bytes()); } } } @@ -637,6 +692,9 @@ impl L1BatchCommitment { aux_commitments: self.aux_commitments(), compressed_initial_writes, compressed_repeated_writes, + local_root: self.auxiliary_output.local_root(), + aggregation_root: self.auxiliary_output.aggregation_root(), + state_diff_hash: self.auxiliary_output.state_diff_hash(), } } } @@ -673,7 +731,8 @@ pub enum CommitmentInput { system_logs: Vec, state_diffs: Vec, aux_commitments: AuxCommitments, - blob_commitments: Vec, + blob_hashes: Vec, + aggregation_root: H256, }, } @@ -715,11 +774,11 @@ impl CommitmentInput { events_queue_commitment: H256::zero(), bootloader_initial_content_commitment: H256::zero(), }, - blob_commitments: { + blob_hashes: { let num_blobs = num_blobs_required(&protocol_version); - - vec![H256::zero(); num_blobs] + vec![Default::default(); num_blobs] }, + aggregation_root: H256::zero(), } } } @@ -734,4 +793,7 @@ pub struct L1BatchCommitmentArtifacts { pub compressed_repeated_writes: Option>, pub zkporter_is_available: bool, pub aux_commitments: Option, + pub aggregation_root: H256, + pub local_root: H256, + pub state_diff_hash: H256, } diff --git a/core/lib/types/src/commitment/tests/mod.rs b/core/lib/types/src/commitment/tests/mod.rs index 33fb0142b04d..a95318309a28 100644 --- a/core/lib/types/src/commitment/tests/mod.rs +++ b/core/lib/types/src/commitment/tests/mod.rs @@ -55,3 +55,8 @@ fn post_boojum_1_5_0() { fn post_boojum_1_5_0_with_evm() { run_test("post_boojum_1_5_0_test_with_evm"); } + +#[test] +fn post_gateway() { + run_test("post_gateway_test"); +} diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json index c5eccbce038a..c854a6e77d8f 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json @@ -190,10 +190,17 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -248,14 +255,18 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "local_root": "0xe52d57bd64cabf6c588b30365512da2bf10912c106e7a06483b236d05ac4037e" } }, "hashes": { diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json index 4983bbeca143..96aa8ab842ce 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json @@ -206,10 +206,17 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002" - ] + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + } + ], + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -264,14 +271,18 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + } ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002" - ] + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "local_root": "0x0b6e1ad4643cc2bee06b5e173184ec822d80826e5720f5715172898350433299" } }, "hashes": { diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json index 59a24b7c90ce..ed61ea67cefc 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json @@ -238,24 +238,73 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002", - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -310,42 +359,74 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000007", - "0x0000000000000000000000000000000000000000000000000000000000000008", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002", - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "local_root": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff" } }, "hashes": { diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json index 4e8c0e0814a0..a41aa33c04a1 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json @@ -239,24 +239,73 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002", - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -312,42 +361,74 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000007", - "0x0000000000000000000000000000000000000000000000000000000000000008", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002", - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "local_root": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff" } }, "hashes": { diff --git a/core/lib/types/src/commitment/tests/post_gateway_test.json b/core/lib/types/src/commitment/tests/post_gateway_test.json new file mode 100644 index 000000000000..4b598ff59f4f --- /dev/null +++ b/core/lib/types/src/commitment/tests/post_gateway_test.json @@ -0,0 +1,1977 @@ +{ + "hashes": { + "pass_through_data": "0x756c1660f611302295f6a56a8f4b9d68f2ebf51f8278f225d6b7e64bb9364be0", + "aux_output": "0xcccf1ef8192054cb1b5fb668868ce4e069a695a1394b9486ebd3031cec12fe12", + "meta_parameters": "0xdb298fa55c75b134333cee0b39f77aea956553a1eb861a5777dc7a66ad7a55b9", + "commitment": "0xd6615c5447c817a320c69c6a5af12c472fd4d5bc2ef4de7806d40afe384ddc27" + }, + "auxiliary_output": { + "PostBoojum": { + "common": { + "l2_l1_logs_merkle_root": "0x38eaeef3afe69b6f6b2fa22c92da8137f1e405a1e1861b7de7cfa30c7d7462dd", + "protocol_version": "Version27" + }, + "system_logs_linear_hash": "0xe8460ce1ed47b77cfee3cadf803aa089c144c506ea2bdd358a6a38ff2c7bc8e3", + "state_diffs_compressed": [ + 1,0,27,89,4,0,148,112,120,89,162,183,230,11,175,17,100,223,232,175,83,47,195,198,157,29,129,145,197,186,61,127,17,109,250,141,181,206,45,0,1,0,1,67,6,51,197,115,134,143,51,94,49,6,252,85,139,173,197,6,118,46,184,24,78,249,206,120,93,239,110,206,130,208,215,121,46,249,196,126,160,123,216,26,86,45,8,246,35,74,8,171,141,141,223,145,137,150,142,180,236,158,154,37,0,1,0,14,207,174,184,55,189,9,139,207,155,222,111,194,204,216,232,169,53,90,27,112,230,1,172,24,205,8,158,179,8,246,11,47,22,184,171,230,29,125,57,179,213,44,191,157,128,184,167,253,5,55,217,60,33,8,75,147,188,5,4,171,60,0,1,0,0,195,40,243,40,221,130,10,29,214,152,4,122,127,125,73,135,77,130,89,25,110,39,53,23,67,10,248,244,128,203,204,98,199,195,136,172,152,215,47,208,131,209,215,32,206,186,255,203,162,198,108,114,94,200,185,197,197,240,116,111,138,0,1,0,0,91,142,88,121,116,4,61,89,191,251,246,50,208,32,231,100,149,154,190,98,228,194,56,216,223,46,98,178,181,235,143,74,199,189,78,241,151,159,154,102,86,114,178,92,208,123,30,61,99,122,89,162,199,107,26,34,232,91,117,146,65,0,1,0,1,67,6,51,197,115,134,143,51,94,49,6,252,85,139,173,197,6,118,46,184,24,78,249,206,120,93,239,110,206,66,202,106,148,168,163,117,186,10,227,150,70,185,29,164,88,23,175,73,33,116,119,174,107,73,193,3,53,191,78,11,115,0,1,0,1,179,126,95,140,175,172,146,75,62,102,55,121,225,44,128,218,206,138,2,177,210,115,174,112,143,39,90,214,42,71,164,52,102,233,54,181,135,193,191,158,16,243,13,105,123,17,66,228,89,233,6,51,219,18,27,114,127,245,180,121,43,0,1,0,0,103,85,79,83,198,102,14,83,189,151,41,240,10,238,1,137,1,13,254,184,104,250,60,189,72,18,50,72,12,95,233,138,250,154,14,193,124,34,99,2,123,232,159,70,229,238,83,58,30,33,169,31,255,76,177,68,204,74,239,33,188,0,1,0,5,181,151,235,2,5,139,35,12,238,106,156,223,71,5,65,13,21,26,209,234,106,211,226,222,119,132,26,207,194,142,107,134,4,159,177,116,20,22,26,103,250,177,44,27,34,144,68,85,154,16,112,202,176,23,66,216,18,18,139,156,190,0,1,0,0,103,85,79,83,198,102,14,83,189,151,41,240,10,238,1,137,1,13,254,184,104,250,60,189,72,18,50,72,12,140,78,248,13,214,150,89,51,34,40,248,136,164,249,180,131,12,12,50,27,140,170,45,67,53,35,250,200,97,150,181,207,0,1,0,4,139,241,177,101,244,200,80,208,30,184,30,92,4,15,112,234,1,220,160,25,231,128,168,127,128,236,120,151,195,235,96,109,232,32,223,19,108,89,177,255,121,225,223,146,96,66,108,164,143,175,146,74,70,53,36,33,31,183,195,161,165,0,1,0,2,1,233,35,243,228,122,223,167,196,240,246,30,168,221,66,86,246,109,16,153,151,11,205,49,72,146,134,73,23,95,127,60,252,81,90,137,254,204,8,97,6,46,254,57,252,48,104,36,27,116,128,26,163,170,147,245,100,172,98,242,54,0,1,0,6,29,196,13,11,194,203,129,144,94,107,193,66,30,189,75,216,183,154,172,184,218,79,157,113,69,178,136,103,79,207,14,96,187,125,119,111,198,230,184,241,1,19,161,190,119,25,192,44,34,151,163,108,216,124,11,59,35,121,140,74,95,0,1,0,7,15,28,212,226,131,6,7,175,48,25,39,67,145,15,223,171,241,221,0,74,128,115,148,66,117,129,157,29,254,53,89,195,90,167,22,152,246,194,202,70,67,239,232,80,69,169,73,79,38,45,119,238,103,193,61,215,52,230,38,48,90,0,1,0,1,67,6,51,197,115,134,143,51,94,49,6,252,85,139,173,197,6,118,46,184,24,78,249,206,120,93,239,110,206,186,93,97,46,65,80,43,253,205,126,211,179,176,210,212,177,245,200,248,185,15,209,21,42,187,224,222,192,14,162,61,7,0,1,0,4,139,241,177,101,244,200,80,208,30,184,30,92,4,15,112,234,1,220,160,25,231,128,168,127,128,236,120,151,195,44,61,141,160,220,54,28,84,148,218,146,175,212,98,94,116,25,190,241,121,131,189,209,145,214,33,89,62,212,173,57,47,0,1,0,3,35,183,38,99,20,185,211,236,56,125,91,205,149,144,165,11,224,14,120,136,55,202,90,136,13,151,227,238,131,48,100,113,86,255,138,164,229,100,8,99,14,34,251,194,115,119,250,250,242,7,188,204,248,210,254,18,115,9,165,229,233,0,1,0,2,21,205,111,48,109,117,125,93,47,219,180,96,198,15,41,133,132,23,158,236,192,113,150,199,174,142,79,141,100,200,51,40,178,38,74,180,112,167,221,220,163,38,200,255,61,159,78,76,252,60,226,78,168,221,216,201,180,12,20,188,185,0,1,0,2,1,233,35,243,228,122,223,167,196,240,246,30,168,221,66,86,246,109,16,153,151,11,205,49,72,146,134,73,23,229,176,77,5,169,51,88,54,33,49,122,209,137,227,159,45,116,33,7,146,238,29,46,153,91,171,175,162,128,71,14,27,0,1,0,1,83,6,251,15,11,95,222,33,153,150,85,28,128,114,198,113,27,186,83,0,178,102,154,235,15,14,76,116,69,250,253,202,115,87,157,171,40,23,48,73,193,157,78,81,69,162,232,29,120,68,42,125,135,121,254,156,149,143,198,173,119,0,1,0,0,53,186,49,237,22,194,17,96,120,10,192,151,99,191,213,147,177,116,143,105,230,131,169,251,16,146,26,164,157,188,67,139,78,87,51,241,113,6,164,3,59,144,48,243,76,127,49,1,147,79,102,218,253,36,37,149,91,92,247,35,64,0,1,0,0,237,48,107,41,18,142,6,90,253,206,115,235,200,224,236,208,140,217,24,253,192,247,76,3,246,77,81,94,72,6,115,118,133,155,130,106,66,115,187,68,69,230,28,222,77,91,95,90,23,4,86,255,161,95,247,195,108,233,152,241,190,0,1,0,1,63,74,44,150,76,113,212,159,45,136,118,161,30,238,75,244,232,209,146,49,101,47,199,117,178,202,228,58,33,129,4,114,76,225,212,61,128,125,223,69,48,213,107,167,249,183,181,194,21,67,99,215,247,166,215,108,189,158,61,249,130,0,1,0,0,103,33,171,145,26,65,216,181,37,2,234,76,223,66,236,153,229,229,41,190,106,62,102,243,173,178,20,60,90,51,232,118,240,225,158,242,19,13,216,95,254,79,35,196,212,101,148,164,24,219,221,10,181,111,253,164,76,93,72,246,206,0,1,0,5,187,116,113,160,1,107,28,51,15,118,122,94,115,173,12,51,9,52,105,76,173,0,124,90,163,249,38,190,101,25,143,99,94,50,239,35,14,215,12,184,219,25,32,81,51,246,142,27,126,246,157,133,33,13,119,172,197,111,163,43,234,0,1,0,5,21,101,126,190,1,77,145,81,72,7,18,222,122,16,141,155,14,26,122,121,141,61,162,148,91,165,60,209,3,90,251,120,52,143,255,91,253,53,60,239,129,160,65,213,230,214,195,241,114,123,145,145,220,232,75,132,91,7,118,101,237,0,1,0,6,49,18,87,196,3,187,186,126,42,239,10,162,8,34,98,124,130,236,116,132,252,179,27,135,221,140,88,42,169,240,163,222,200,2,37,101,9,35,172,42,74,77,142,96,167,8,137,208,171,61,234,142,107,218,41,37,203,138,127,216,252,137,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,174,229,34,198,20,187,1,37,21,66,226,45,128,16,30,45,151,85,103,77,143,214,69,38,254,154,44,77,223,171,97,143,137,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,159,186,133,75,226,253,235,173,50,111,19,111,136,219,244,177,114,214,77,28,237,51,180,171,99,164,148,28,226,73,151,137,7,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,201,172,113,10,243,67,127,194,244,249,48,131,50,164,72,10,88,81,76,45,149,28,73,119,114,174,142,141,132,8,175,27,137,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,182,81,125,81,147,30,201,86,98,178,2,213,133,189,82,214,234,207,27,118,113,82,28,46,150,32,45,104,62,223,226,99,137,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,28,170,140,159,117,250,84,163,177,210,240,18,225,217,234,99,118,79,112,157,28,25,151,121,72,28,143,77,92,237,107,62,9,1,45,86,211,71,72,250,222,240,196,161,223,115,65,15,173,85,177,255,211,89,90,168,146,255,238,205,12,128,137,196,203,27,9,1,12,138,2,247,115,213,230,144,244,253,93,195,182,20,84,243,71,244,71,24,244,103,128,231,65,233,198,173,128,126,246,169,9,1,185,67,192,97,81,170,106,240,157,23,26,106,216,228,65,120,68,165,135,110,2,31,216,158,187,67,79,105,151,157,234,15,9,1,12,91,247,177,168,165,63,93,186,29,121,106,121,167,27,50,198,22,230,125,252,159,77,132,92,155,115,251,100,87,112,147,9,1,127,216,219,101,110,162,74,65,114,24,131,123,143,204,15,44,36,72,8,136,170,255,39,231,108,143,128,71,65,95,117,4,9,1,4,27,221,133,245,254,194,84,195,88,19,141,109,233,58,225,116,116,251,225,170,44,159,23,28,181,85,238,6,151,63,144,9,1,130,114,163,11,234,79,36,77,175,198,107,147,58,183,234,134,122,178,61,205,225,34,184,146,138,50,221,70,198,19,2,191,9,1,41,151,245,131,127,195,211,240,254,148,60,106,169,97,173,173,118,116,195,243,213,115,169,17,155,83,25,181,108,68,48,51,9,1,236,254,112,114,24,207,172,186,132,163,119,198,20,66,226,51,51,142,39,212,88,198,68,118,92,136,138,204,26,155,11,165,9,1,215,116,3,65,140,246,136,209,81,15,184,214,188,49,94,63,81,57,12,135,108,143,41,137,113,88,11,84,15,30,158,184,9,1,50,203,140,60,152,140,103,117,103,130,42,29,70,236,110,49,211,18,247,40,117,35,54,107,171,190,233,18,117,69,68,43,9,1,95,50,92,228,81,22,16,190,182,42,66,158,131,165,204,25,25,20,143,210,29,170,143,129,94,111,129,132,227,28,102,180,9,1,96,153,155,64,152,142,147,161,102,88,190,194,238,147,14,243,71,26,33,184,193,50,249,29,88,2,52,157,179,7,69,77,9,1,212,255,51,143,196,70,53,156,98,221,171,235,82,21,252,198,242,28,2,246,195,67,6,91,2,240,95,173,200,49,66,89,9,1,58,212,15,123,117,242,193,79,217,63,177,112,56,232,153,140,93,188,111,168,108,138,82,113,212,107,209,150,246,205,191,157,9,1,204,180,39,152,129,136,139,125,156,240,127,28,205,2,65,12,140,132,177,76,5,4,95,204,205,9,179,77,57,148,6,231,9,1,249,145,142,225,129,214,86,160,12,71,51,28,109,238,246,115,57,184,6,234,138,46,107,81,103,128,201,242,101,51,179,68,9,1,17,109,102,169,143,117,42,93,149,160,20,188,122,34,0,140,248,73,206,232,146,65,183,250,61,35,40,54,167,63,173,215,9,1,58,197,183,31,149,121,187,250,193,140,202,222,69,149,235,105,78,113,59,213,78,241,15,40,62,137,46,19,193,78,85,31,9,1,209,81,122,212,165,252,102,254,115,58,127,209,26,21,188,113,69,3,30,255,154,72,181,219,97,7,227,96,209,19,138,181,9,1,151,245,134,14,100,30,161,175,227,142,158,71,197,157,213,103,198,28,241,51,173,107,242,84,76,53,176,101,132,26,29,60,9,1,72,239,59,253,40,148,227,213,236,98,100,14,198,212,71,148,180,209,64,152,228,196,11,209,109,231,183,97,135,156,172,241,9,1,205,107,120,198,53,118,206,64,8,204,37,15,58,43,95,189,38,38,203,212,73,105,50,160,21,160,38,124,10,233,46,22,17,1,14,204,0,103,138,56,182,245,161,43,137,56,202,232,138,228,30,242,80,214,237,253,8,17,251,148,203,85,106,127,162,114,20,161,98,175,120,161,168,109,95,168,170,123,117,238,24,132,211,108,163,195,25,58,89,16,198,41,13,94,89,2,119,169,28,80,179,104,66,21,38,252,16,146,163,159,122,68,234,161,165,150,251,139,57,4,0,82,244,49,170,53,221,128,152,46,60,102,97,65,18,80,60,162,198,227,68,116,95,74,43,207,201,189,126,9,199,85,132,67,87,214,19,120,116,147,47,78,236,178,95,23,23,1,171,197,181,63,197,52,162,224,221,93,223,35,243,248,138,100,215,25,1,0,2,94,249,142,243,210,28,27,218,191,10,139,245,66,107,2,111,153,125,18,238,76,249,208,69,34,173,165,21,177,18,82,239,17,1,14,217,248,73,34,237,151,158,186,178,226,225,234,58,186,218,7,175,174,60,185,248,248,25,28,51,154,61,168,213,77,242,169,0,82,244,49,170,53,221,128,152,46,60,102,97,65,18,80,60,162,198,227,68,116,95,74,43,207,201,189,126,9,199,85,132,194,179,227,24,162,84,191,59,100,89,207,244,98,199,135,44,91,35,210,22,182,249,66,219,89,32,250,61,112,54,16,141,161,98,175,120,161,168,109,95,168,170,123,117,238,24,132,211,108,163,195,25,58,60,170,232,127,53,17,58,173,142,247,89,247,207,149,119,134,64,14,158,82,18,231,188,179,163,89,11,174,81,43,46,153,9,9,132,119,216,135,171,185,255,65,210,32,46,77,152,229,32,71,244,141,39,140,188,245,22,25,184,28,198,202,132,222,174,160,25,1,0,2,119,228,107,44,53,217,61,182,86,125,189,169,81,109,32,249,139,212,234,72,144,24,135,118,89,121,216,219,24,207,66,168,17,1,14,213,5,71,76,80,42,16,77,105,27,101,50,79,76,38,232,167,55,134,79,128,251,113,33,35,116,77,254,28,6,176,0,0,82,244,49,170,53,221,128,152,46,60,102,97,65,18,80,60,162,198,227,68,116,95,74,43,207,201,189,126,9,199,85,132,110,177,116,132,158,116,7,177,77,240,138,82,212,212,241,43,54,8,1,75,42,104,243,5,241,73,226,60,72,169,2,41,25,1,0,3,73,127,150,33,212,30,131,171,90,28,221,170,53,22,176,210,81,154,146,160,81,67,188,184,7,13,240,169,97,51,230,181,9,100,71,111,167,179,223,229,107,45,223,184,100,207,103,16,106,234,217,25,120,51,156,12,142,28,186,4,134,110,182,28,191,11,9,9,62,169,255,238,205,94,99,210,162,31,213,85,158,233,223,231,174,18,241,77,26,133,255,75,40,190,65,163,26,48,53,196,25,1,0,2,232,83,248,233,232,89,11,170,74,117,125,224,222,189,198,137,244,49,205,228,155,200,97,42,160,89,8,63,109,25,91,168,9,9,255,134,239,235,78,5,0,110,98,20,109,14,192,231,250,72,49,145,191,114,177,51,38,242,67,121,217,71,114,50,124,171,9,9,209,167,69,145,2,139,203,92,187,46,4,30,218,0,85,77,176,3,253,201,73,229,148,92,229,57,32,59,244,12,109,96,9,9,113,233,23,33,249,145,133,118,215,96,240,47,3,202,196,124,111,64,3,49,96,49,132,142,60,29,153,230,232,58,71,67,65,14,41,230,74,233,195,128,0,49,87,111,239,58,195,179,2,237,163,15,66,168,74,199,52,200,236,175,1,55,3,126,248,127,239,193,246,133,27,151,79,57,134,3,21,27,16,164,160,185,211,150,83,253,116,26,253,56,22,83,204,70,30,122,203,221,134,84,251,39,141,138,17,246,159,212,31,236,239,75,201,65,5,60,24,80,250,182,152,192,250,91,168,183,69,6,78,180,185,147,215,10,134,34,96,243,26,77,158,213,121,211,188,200,73,204,177,205,8,52,178,106,57,74,136,235,186,254,43,32,141,97,126,192,90,203,191,95,226,69,41,166,75,35,133,169,106,173,67,240,155,225,173,169,44,112,64,49,220,193,72,27,65,8,29,65,249,24,254,23,128,162,84,32,193,217,215,5,53,140,19,76,198,1,217,209,132,203,77,253,222,126,28,172,43,195,212,211,139,249,236,68,230,33,5,245,225,0,18,59,175,197,134,247,119,100,72,140,210,76,106,119,84,110,90,15,232,189,251,79,162,3,207,175,252,54,204,228,221,91,137,1,0,0,0,0,0,0,0,0,0,0,0,0,102,252,2,65,142,125,208,106,197,183,59,71,59,230,188,90,81,3,15,76,116,55,101,124,183,178,155,243,118,197,100,184,209,103,90,94,137,2,0,0,0,0,0,0,0,0,0,0,0,0,102,252,2,66,75,168,78,31,55,208,65,188,110,85,186,57,104,38,204,73,78,132,212,129,91,109,181,38,144,66,46,234,115,134,49,79,0,232,231,118,38,88,111,115,185,85,54,76,123,75,191,11,183,247,104,94,189,64,232,82,177,100,99,58,74,203,211,36,76,61,226,32,44,203,98,106,211,135,215,7,34,230,79,190,68,86,46,47,35,26,41,12,8,83,43,141,106,186,64,47,245,0,242,170,235,6,192,229,86,67,74,201,60,35,47,55,221,139,224,167,191,159,67,15,118,235,86,77,249,252,183,112,196,95,121,9,53,136,208,232,71,239,167,58,16,206,32,228,121,159,177,228,102,66,214,86,23,199,229,33,63,160,73,137,217,45,137,2,0,0,0,0,0,0,0,0,0,0,0,0,102,252,2,66,135,222,210,71,225,102,15,130,112,113,199,241,55,25,52,88,151,81,8,83,132,252,159,68,98,193,241,137,124,92,62,239,137,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,134,36,129,147,235,77,210,168,206,129,95,135,108,18,77,72,53,149,34,240,133,77,149,216,7,46,175,240,211,125,85,189,17,3,32,62,137,13,108,44,59,173,166,238,204,150,3,169,156,28,98,89,237,90,100,2,241,199,108,193,139,86,140,58,239,186,15,17,34,169,145,29,210,173,116,63,242,55,212,17,100,138,15,227,44,109,116,238,192,96,113,106,42,116,53,47,107,28,67,91,93,103,0,249,3,11,120,197,191,90,201,151,167,105,98,170,50,201,10,109,142,142,188,233,131,140,142,235,56,141,115,225,247,101,154,214,116,246,54,163,90,111,26,81,86,78,195,55,27,156,77,163,18,109,90,208,186,227,80,207,199,250,234,199,99,99,184,9,255,104,98,246,102,108,85,7,252,51,21,214,132,35,158,2,38,112,107,69,195,65,114,145,245,183,172,194,211,57,80,82,17,161,138,214,89,21,244,44,6,173,6,242,62,51,254,238,79,87,6,12,210,73,180,68,244,119,54,206,136,162,78,107,80,251,5,29,192,174,93,179,175,68,217,8,246,220,217,160,21,208,74,126,225,227,25,1,0,3,59,172,224,22,174,10,65,231,169,237,9,168,91,33,85,109,38,187,242,242,75,76,32,165,75,187,165,27,95,83,162,158,25,1,0,5,237,36,132,158,202,168,131,171,106,32,214,79,172,224,148,150,15,71,73,102,217,162,19,183,2,117,192,112,196,76,181,34,161,61,185,77,114,162,95,100,135,66,67,175,119,110,6,244,73,213,91,169,221,83,157,81,206,111,89,151,62,178,167,63,16,226,11,189,169,125,149,14,110,8,62,221,87,116,233,142,217,139,253,153,16,9,1,186,154,222,88,248,170,108,168,43,242,42,43,72,15,245,221,236,232,166,232,99,81,164,123,16,213,143,51,128,251,219,183,9,1,103,185,128,156,225,233,200,126,96,129,32,179,163,131,84,200,153,155,236,34,245,43,19,243,165,109,226,10,22,113,50,131,9,1,203,224,11,159,230,121,179,34,119,46,123,13,250,7,202,214,183,18,124,144,172,158,237,255,172,53,228,144,236,81,142,168,161,44,60,95,185,9,118,123,106,246,85,186,215,47,93,102,56,245,245,208,160,183,144,135,107,116,64,90,68,61,138,52,178,244,96,20,237,96,5,52,90,158,129,172,204,39,175,55,18,74,73,29,222,9,255,36,49,6,86,93,12,79,206,248,151,94,121,177,178,35,12,1,159,78,58,178,122,63,78,124,169,48,107,159,98,153,132,161,138,214,89,21,244,44,6,173,6,242,62,51,254,238,79,87,6,12,210,73,208,111,9,167,39,114,89,78,179,210,171,35,115,181,211,197,236,176,132,184,74,77,237,45,48,18,241,69,222,221,138,25,161,93,224,137,41,163,192,131,82,50,167,205,32,29,51,131,23,202,171,216,115,199,117,193,115,55,85,171,245,34,173,219,214,151,226,215,10,91,97,70,75,209,104,27,41,137,81,196,246,13,142,199,12,25,1,0,4,113,24,36,136,41,22,138,100,28,59,149,105,31,231,215,27,33,193,211,238,215,254,44,202,236,107,125,180,46,38,146,200,25,1,0,2,38,182,122,48,1,162,205,218,95,52,172,146,222,81,199,193,42,178,228,105,133,88,214,83,137,237,66,230,119,250,5,85,9,255,253,39,5,238,88,207,60,229,238,92,28,224,63,70,109,126,152,54,188,71,18,186,162,153,21,61,132,71,202,121,113,207,161,138,214,89,21,244,44,6,173,6,242,62,51,254,238,79,87,6,12,210,73,191,14,175,103,174,227,73,235,177,89,118,163,111,237,172,26,74,6,101,80,58,211,113,127,243,254,106,224,120,179,101,156,161,76,72,189,176,20,90,137,189,135,248,136,79,63,168,193,228,45,88,94,117,184,2,226,247,10,56,240,40,136,100,146,139,246,198,195,194,209,164,217,45,202,10,147,86,175,254,198,249,92,64,121,164,0,1,0,0,253,83,6,143,195,92,106,35,254,224,103,188,253,63,192,216,128,186,79,121,216,182,90,175,191,240,47,48,93,168,74,2,141,105,206,208,33,164,189,140,91,38,36,168,242,80,217,18,184,248,245,157,129,85,249,94,94,229,138,101,38,9,1,47,241,202,110,153,206,246,252,92,214,119,95,159,94,245,61,243,40,240,8,26,143,180,81,247,55,255,244,73,12,229,83,9,1,95,192,113,95,216,242,21,235,124,16,227,245,80,217,178,9,241,140,170,135,64,175,84,27,211,70,239,73,100,139,20,245,9,1,201,169,20,123,206,251,168,141,33,64,175,106,246,185,19,185,53,101,125,53,5,87,5,184,7,21,91,61,208,130,42,131,9,1,183,44,52,109,222,204,99,77,172,182,15,29,40,214,131,168,39,33,227,213,36,163,61,162,168,47,3,62,136,241,101,126,9,1,133,79,165,174,6,191,41,30,209,5,109,104,28,93,197,246,247,13,23,242,234,3,204,110,233,229,198,255,131,62,203,105,0,9,36,146,140,19,119,166,207,36,195,156,45,70,248,235,157,242,62,129,27,38,220,53,39,229,72,57,111,212,225,115,177,211,102,235,93,180,24,37,200,29,129,191,72,73,93,114,116,50,181,244,253,225,248,223,46,101,251,180,223,113,77,242,139,0,70,112,11,77,64,172,92,53,175,44,34,221,162,120,122,145,235,86,123,6,201,36,168,251,138,233,160,91,32,192,140,33,228,96,52,194,207,181,181,131,126,57,95,233,204,152,190,4,82,34,235,53,200,202,40,109,252,73,189,213,239,94,126,130,9,1,27,8,240,147,212,200,27,37,231,124,191,110,45,189,91,214,149,171,253,138,221,47,115,230,14,214,92,143,87,109,114,128,17,1,249,248,63,134,138,17,62,7,250,227,100,52,50,139,214,30,153,110,204,16,117,222,9,119,59,220,202,187,15,30,237,162,217,9,1,207,50,50,89,38,214,97,46,146,127,167,239,70,37,230,216,37,111,63,130,63,184,65,242,102,240,65,120,90,218,241,226,9,1,247,124,190,104,95,142,126,239,68,219,69,165,161,237,129,135,165,5,236,239,227,84,140,240,18,4,129,67,95,125,116,254,0,70,112,11,77,64,172,92,53,175,44,34,221,162,120,122,145,235,86,123,6,201,36,168,251,138,233,160,91,32,192,140,33,64,31,45,164,25,35,131,214,111,103,185,66,123,36,77,209,130,54,238,77,124,250,76,42,126,68,137,156,53,223,112,84,9,1,172,109,18,138,162,172,98,227,191,233,228,200,186,6,38,31,205,90,238,83,85,200,140,40,95,174,70,100,236,184,92,217,161,244,45,89,208,100,220,62,250,92,105,132,16,63,83,84,164,87,143,157,56,238,206,71,73,61,115,66,84,21,49,226,43,98,209,124,67,230,245,74,241,47,105,36,12,239,120,5,217,170,54,156,84,161,240,229,12,107,226,171,19,248,82,37,157,153,49,126,15,161,81,30,210,115,133,159,227,100,212,172,149,230,75,232,210,108,56,145,60,23,37,166,185,84,193,191,193,253,113,198,103,19,58,1,211,88,161,138,214,89,21,244,44,6,173,6,242,62,51,254,238,79,87,6,12,210,73,244,204,176,178,59,175,3,143,139,2,242,240,21,87,122,194,191,65,151,96,89,50,229,228,174,155,172,240,102,252,221,88,161,54,97,92,243,73,215,246,52,72,145,177,231,202,124,114,136,63,93,192,73,97,131,206,41,240,31,150,151,163,154,135,110,104,89,178,252,214,86,245,40,217,82,157,194,186,14,137,246,116,87,3,221,0,88,209,228,65,175,80,39,254,110,76,107,116,157,222,72,114,28,239,59,179,26,8,211,214,75,156,110,156,114,90,188,114,36,198,234,1,57,238,186,239,33,185,70,78,68,110,74,247,188,177,180,151,164,216,15,115,133,254,13,247,190,87,17,67,0,232,148,145,23,177,217,122,193,138,141,18,36,190,30,64,69,198,123,105,131,146,212,237,192,61,179,237,70,185,233,70,115,24,243,123,134,88,68,215,142,127,133,2,149,85,85,176,160,214,111,3,112,143,142,78,137,211,79,138,29,6,66,209,69,161,250,30,45,145,12,250,62,92,70,95,125,127,105,194,36,213,66,224,165,152,168,81,60,218,234,227,67,148,118,29,59,147,53,55,78,40,8,227,39,217,122,18,110,222,78,162,140,204,238,55,6,95,41,25,0,0,0,0,144,32,12,17,126,234,225,99,200,138,138,108,231,51,212,1,171,8,94,147,139,188,115,131,162,159,107,192,34,19,171,180,161,151,178,33,144,104,181,16,79,208,222,10,42,70,102,179,246,243,151,172,167,86,83,107,175,210,186,181,198,128,36,151,56,29,81,196,245,31,168,78,1,68,190,24,94,31,195,247,20,122,219,85,214,0,172,224,132,217,231,157,205,174,1,20,9,234,148,84,215,130,24,9,46,11,24,156,214,165,23,59,68,102,116,0,213,17,230,13,150,23,44,152,198,242,109,118,74,176,93,184,102,158,85,104,138,31,78,160,214,75,29,223,239,114,74,97,156,59,161,94,69,18,60,117,174,107,34,34,42,46,244,10,252,240,128,232,79,90,245,105,207,185,63,169,21,50,218,22,157,164,50,155,208,136,29,218,73,246,12,13,209,254,95,239,141,35,221,253,207,221,212,9,1,248,135,166,243,99,75,238,65,244,69,142,255,92,110,93,81,203,0,116,149,135,131,96,149,14,49,60,161,204,107,128,214,10,1,164,102,179,128,123,95,250,209,85,215,47,223,202,234,9,179,137,135,46,66,252,162,20,55,210,106,243,173,46,40,178,89,161,54,97,92,243,73,215,246,52,72,145,177,231,202,124,114,136,63,93,192,73,156,179,78,101,253,32,134,94,34,150,203,47,196,201,136,120,12,142,64,149,165,101,204,29,186,80,109,39,8,84,79,232,161,128,2,205,152,207,181,99,73,42,111,179,231,200,36,59,123,154,212,204,146,140,190,129,206,40,197,26,241,53,110,14,80,134,5,37,204,178,45,166,159,129,221,136,86,105,96,102,217,243,16,89,249,161,104,60,252,156,182,35,11,128,137,157,188,182,24,21,145,214,144,137,216,164,177,52,180,226,24,99,117,67,64,64,241,12,41,231,167,74,209,204,218,129,255,34,102,39,251,93,142,41,145,92,203,50,161,138,214,89,21,244,44,6,173,6,242,62,51,254,238,79,87,6,12,210,73,164,143,172,103,50,238,187,229,211,197,117,198,212,234,78,82,14,197,48,46,66,117,89,74,193,245,113,83,209,177,183,217,161,81,125,159,102,197,100,210,237,244,60,182,87,66,79,114,241,194,13,59,255,230,154,81,238,249,118,81,47,67,167,51,81,39,137,145,76,187,230,155,155,70,102,153,242,27,165,84,224,218,253,106,44,9,255,60,175,19,171,24,127,90,244,102,215,109,52,163,108,87,248,78,75,87,18,254,157,225,108,251,34,166,46,135,226,155,114,9,1,137,66,124,98,185,206,118,62,215,166,225,249,150,252,161,203,0,217,115,193,56,251,207,17,2,102,50,45,221,187,226,50,177,93,224,137,41,163,192,131,82,50,167,205,32,29,51,131,23,202,171,216,115,0,2,86,146,226,162,251,7,31,172,102,81,196,200,22,126,55,247,235,106,113,202,76,139,128,246,85,114,151,178,101,95,159,191,161,51,139,210,222,212,86,156,86,143,33,218,23,74,206,199,11,130,110,85,12,238,250,55,221,42,57,86,177,201,103,75,239,3,21,25,161,11,112,13,117,46,113,1,71,100,17,102,72,32,125,98,78,0,1,0,0,253,83,6,143,195,92,106,35,254,224,103,188,253,63,192,216,128,186,79,121,216,182,90,175,191,240,47,48,93,22,155,114,188,54,101,62,87,11,121,52,68,25,197,106,219,1,155,20,238,224,223,45,229,125,209,123,117,113,166,132,12,161,220,14,214,252,191,229,28,159,132,166,98,230,79,209,52,119,54,170,116,134,81,122,205,57,136,220,234,247,156,218,204,189,130,97,114,48,48,160,227,48,48,64,28,58,212,244,195,137,255,228,64,41,161,128,2,205,152,207,181,99,73,42,111,179,231,200,36,59,123,154,212,204,146,237,236,215,194,80,85,137,94,24,27,243,41,116,140,187,70,33,135,209,48,214,17,9,198,53,117,79,21,244,235,240,208,9,255,219,215,135,89,179,97,218,60,174,210,247,236,175,60,97,114,55,143,26,104,199,6,53,175,153,170,254,4,26,49,33,168,161,54,97,92,243,73,215,246,52,72,145,177,231,202,124,114,136,63,93,192,73,131,75,158,95,145,124,241,215,162,81,17,8,190,214,110,90,15,123,1,214,244,31,200,40,196,119,19,72,17,44,27,219,161,44,60,95,185,9,118,123,106,246,85,186,215,47,93,102,56,245,245,208,160,62,110,21,209,63,190,73,44,77,121,157,143,198,176,46,157,199,11,251,128,18,248,171,99,94,148,201,218,67,21,70,232,9,255,16,46,124,130,188,155,165,96,66,61,124,176,157,94,180,222,164,199,68,147,148,121,54,59,60,181,162,4,74,28,114,103,9,1,0,0,0,60,9,235 + ], + "state_diffs_hash": "0xc83cac9cd98a4216cbc0d0830e63c4956e4a1c45c122ebbc88af7ea3b496c406", + "aux_commitments": { + "events_queue_commitment": "0xec82208c87a937d88768a0067b2a80f0525eca8288dad2cf96cf8bbe6a1aa565", + "bootloader_initial_content_commitment": "0x97df88dcecbcd29b49773c042cdee7a44c57a741e64913fff5aa1b3484232f28" + }, + "blob_hashes": [ + { + "commitment": "0xf840cf3f6b7dc92729b2b9ef3b399e7b896d553b746362fe81c4eb911013570d", + "linear_hash": "0xff4feb4bef9401731ab9db3626c2e015baa6880d7b1c4382d03b30da3a0fd75e" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1", + "local_root": "0xd4790efa9052ea67dcb473de870e3522e2fc340374e6293ad4646fde312c8c76" + } + }, + "meta_parameters": { + "zkporter_is_available": false, + "bootloader_code_hash": "0x010008c753336bc8d1ddca235602b9f31d346412b2d463cd342899f7bfb73baf", + "default_aa_code_hash": "0x0100055d760f11a3d737e7fd1816e600a4cd874a9f17f7a225d1f1c537c51a1e", + "protocol_version": "Version27" + }, + "pass_through_data": { + "shared_states": [ + { + "last_leaf_index": 212, + "root_hash": "0x0332d2acc43785a44b2b84fc010372c8f3e4ff4d0ca5f312de142ffe74189500" + }, + { + "last_leaf_index": 0, + "root_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ] + }, + "input": { + "PostBoojum": { + "common": { + "l2_to_l1_logs": [ + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 0, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x91ac392a7af99b6df974efe2d6b40e35dc79156fa3b75ea257df4976da0c26e8", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xa088c0c1710f2244aad45e356742e7ac7773a153cf23db6cec4ded7e8da05d69", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 2, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xccdf8bf8f4bf596f8fbb7ed18d67ef6208919707de498d0cae4d2c01c50e2305", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 3, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xe43e076103a2a867921c20b43365e7729003f1a00558c3dc51b31b25c90b2b2a", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 4, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x3eba0d3506eba94a8b328b82b3a32623c2ef77e253bfbb82d2667b163c8714c7", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 5, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xdaf8935b934fe9513d112e3ca61a728dbfae2fdb5ea1edf8e6f56b8283aa4cd8", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 6, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x5527767da575eb332ed996603486300af0f55116f2a177b0c05ed31518a23d77", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 7, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x15f4d69332763eaaf4d80e05e88b04d45d38d0854381f58e4c01688496e03f63", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 8, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x96a04ccc56dc1cea06a00fe97af3231766aee89c948c86f0c16eeebcdddc0aa3", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 9, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x886292e17714665013e6c7fc19b12b15a69676394ec88ceb5d1698a0b198a7dd", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 10, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x9e84bba4d8497ea90d8c5513063912bdbd9cc85ac68185ee1715a7b15ca01f17", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 11, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x3795b197a06415b6b00d2bde238a25741ecc9791269d899c97ff983d88dcd5e6", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 12, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0e5be5348f9a9fd4e936c4fad6793e0e4395f5642d1b5f9a08e1a3703226f8ef", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 13, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xec3e691650319cdf9fbc5410f388ea24f2c9325b0d7b4ce37db2a1c5957bd86b", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 14, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xfa448e8ac5560693b992b70fae5e08f3e9cae510c8e1fa69d2c212dd1811bf05", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 15, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x6c5a74345d321eb4edebdf43f42a11bc409613a9b92cbfe96730498217b12d43", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 16, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x7912d592b280f7f7a5d04c68eaddae09b518816a0a6d97bc89b143ae3109e78f", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 17, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x3c1fad3b48be6cb9503048860557f3ef99dccdf1f51dfbf26570f630469b1a98", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 18, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xb7e755892fbe6870e93cbd3c0945d772e141b94ee50aa75a2d7bb7219fb53266", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 19, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xb81f1f0fbe80e956956132771d1a99c35bd52856adbf932cc061d3980a79c124", + "value": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 20, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x8f5a7c0d48c9b82137c446c9db31ce5ef4e1a30166dd3ae09580c33595bbe2b7", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 21, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x2f6516d033cfa362a407a7d2d2279c62fa185eaae6742bc6f51fdcb51606094e", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 22, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x82eb8a4152ff724ef814c3ddacea2a65e6e6d09a00d72e57fff9e12b7857461d", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 23, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x08a95d3f4505e0e3fb90a2002a81750c0bae658a5d4a290acaeacdfc2691560a", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 24, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xb8c38e08db553411378fc77ca81f20da7d5b1be77fb316393e33bfe0c08565dd", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 25, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xd54d7593c4d133e4903becb318f109246537ddab2646148ac51ac7c94e25ef8c", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 26, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x3e86b6ddb211d47e057c4e246810e2dbb10061c2679e52ae7e4b647c9c98bf08", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 27, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0xe1cee6c0528143fa82ff667c9655d2d775dccdb4204791956096a6225059c9b8", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + } + ], + "rollup_last_leaf_index": 212, + "rollup_root_hash": "0x0332d2acc43785a44b2b84fc010372c8f3e4ff4d0ca5f312de142ffe74189500", + "bootloader_code_hash": "0x010008c753336bc8d1ddca235602b9f31d346412b2d463cd342899f7bfb73baf", + "default_aa_code_hash": "0x0100055d760f11a3d737e7fd1816e600a4cd874a9f17f7a225d1f1c537c51a1e", + "protocol_version": "Version27" + }, + "system_logs": [ + { + "shard_id": 0, + "is_service": false, + "tx_number_in_block": 0, + "sender": "0x000000000000000000000000000000000000800b", + "key": "0x0000000000000000000000000000000000000000000000000000000000000002", + "value": "0xf9030b78c5bf5ac997a76962aa32c90a6d8e8ebce9838c8eeb388d73e1f7659a" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 0, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000007", + "value": "0x91ac392a7af99b6df974efe2d6b40e35dc79156fa3b75ea257df4976da0c26e8" + }, + { + "shard_id": 0, + "is_service": false, + "tx_number_in_block": 28, + "sender": "0x000000000000000000000000000000000000800b", + "key": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x00000000000000000000000066fc024100000000000000000000000066fc0242" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 28, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000003", + "value": "0x190bda1fde651ac21cf771cb9f125f486678abbab229cce182a7c9a07361afbe" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 28, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000004", + "value": "0x000000000000000000000000000000000000000000000000000000000000001b" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 28, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x38eaeef3afe69b6f6b2fa22c92da8137f1e405a1e1861b7de7cfa30c7d7462dd" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 28, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000006", + "value": "0x000000000000000000000000cc4b013229ffd6cb5eae5876251874172cafed0a" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 28, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000005", + "value": "0x335f4f11c3e55bb502bcbdedfd8e63b8e5c84bea465c984a5c664a8eca7d4a7a" + } + ], + "state_diffs": [ + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x3e013dc3eb10bbd48a8f9c94758f04d081563b6", + "derived_key": [ + 112,120,89,162,183,230,11,175,17,100,223,232,175,83,47,195,198,157,29,129,145,197,186,61,127,17,109,250,141,181,206,45 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10001430633c573868f335e3106fc558badc506762eb8184ef9ce785def6ece" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x868a8819e738818dabe8bfb671ae8e027372dd7", + "derived_key": [ + 130,208,215,121,46,249,196,126,160,123,216,26,86,45,8,246,35,74,8,171,141,141,223,145,137,150,142,180,236,158,154,37 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000ecfaeb837bd098bcf9bde6fc2ccd8e8a9355a1b70e601ac18cd089eb308" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x9af32a0f1b0914742c84d68795a9be9abd6bbd5", + "derived_key": [ + 246,11,47,22,184,171,230,29,125,57,179,213,44,191,157,128,184,167,253,5,55,217,60,33,8,75,147,188,5,4,171,60 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10000c328f328dd820a1dd698047a7f7d49874d8259196e273517430af8f480" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x22acca3c358a523c1ecbf1491d131a597aada298", + "derived_key": [ + 203,204,98,199,195,136,172,152,215,47,208,131,209,215,32,206,186,255,203,162,198,108,114,94,200,185,197,197,240,116,111,138 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100005b8e587974043d59bffbf632d020e764959abe62e4c238d8df2e62b2b5" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "derived_key": [ + 235,143,74,199,189,78,241,151,159,154,102,86,114,178,92,208,123,30,61,99,122,89,162,199,107,26,34,232,91,117,146,65 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10001430633c573868f335e3106fc558badc506762eb8184ef9ce785def6ece" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x30439cdc8796fb3cecb53f4bf5b133f581b5b40f", + "derived_key": [ + 66,202,106,148,168,163,117,186,10,227,150,70,185,29,164,88,23,175,73,33,116,119,174,107,73,193,3,53,191,78,11,115 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10001b37e5f8cafac924b3e663779e12c80dace8a02b1d273ae708f275ad62a" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x338bd2ded4569c568f21da174acec70b826e550c", + "derived_key": [ + 71,164,52,102,233,54,181,135,193,191,158,16,243,13,105,123,17,66,228,89,233,6,51,219,18,27,114,127,245,180,121,43 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000067554f53c6660e53bd9729f00aee0189010dfeb868fa3cbd481232480c" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x48a6f6788413af58f1bdf8c963cb67a4346f5fd8", + "derived_key": [ + 95,233,138,250,154,14,193,124,34,99,2,123,232,159,70,229,238,83,58,30,33,169,31,255,76,177,68,204,74,239,33,188 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10005b597eb02058b230cee6a9cdf4705410d151ad1ea6ad3e2de77841acfc2" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x4c48bdb0145a89bd87f8884f3fa8c1e42d585e75", + "derived_key": [ + 142,107,134,4,159,177,116,20,22,26,103,250,177,44,27,34,144,68,85,154,16,112,202,176,23,66,216,18,18,139,156,190 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000067554f53c6660e53bd9729f00aee0189010dfeb868fa3cbd481232480c" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x517d9f66c564d2edf43cb657424f72f1c20d3bff", + "derived_key": [ + 140,78,248,13,214,150,89,51,34,40,248,136,164,249,180,131,12,12,50,27,140,170,45,67,53,35,250,200,97,150,181,207 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100048bf1b165f4c850d01eb81e5c040f70ea01dca019e780a87f80ec7897c3" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x55f6f01d04a21e76cbd2de9d4a9ff6ee9f8893a6", + "derived_key": [ + 235,96,109,232,32,223,19,108,89,177,255,121,225,223,146,96,66,108,164,143,175,146,74,70,53,36,33,31,183,195,161,165 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000201e923f3e47adfa7c4f0f61ea8dd4256f66d1099970bcd314892864917" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x5caf5f2b06ca757c7cebacdcd6f163af45a6bb83", + "derived_key": [ + 95,127,60,252,81,90,137,254,204,8,97,6,46,254,57,252,48,104,36,27,116,128,26,163,170,147,245,100,172,98,242,54 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100061dc40d0bc2cb81905e6bc1421ebd4bd8b79aacb8da4f9d7145b288674f" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x5e45123c75ae6b22222a2ef40afcf080e84f5af5", + "derived_key": [ + 207,14,96,187,125,119,111,198,230,184,241,1,19,161,190,119,25,192,44,34,151,163,108,216,124,11,59,35,121,140,74,95 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100070f1cd4e2830607af30192743910fdfabf1dd004a8073944275819d1dfe" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "derived_key": [ + 53,89,195,90,167,22,152,246,194,202,70,67,239,232,80,69,169,73,79,38,45,119,238,103,193,61,215,52,230,38,48,90 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10001430633c573868f335e3106fc558badc506762eb8184ef9ce785def6ece" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x683cfc9cb6230b80899dbcb6181591d69089d8a4", + "derived_key": [ + 186,93,97,46,65,80,43,253,205,126,211,179,176,210,212,177,245,200,248,185,15,209,21,42,187,224,222,192,14,162,61,7 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100048bf1b165f4c850d01eb81e5c040f70ea01dca019e780a87f80ec7897c3" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x7cf7e4a85a7a677f6a5b2fe169e6d5eef29219c5", + "derived_key": [ + 44,61,141,160,220,54,28,84,148,218,146,175,212,98,94,116,25,190,241,121,131,189,209,145,214,33,89,62,212,173,57,47 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000323b7266314b9d3ec387d5bcd9590a50be00e788837ca5a880d97e3ee83" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x97b2219068b5104fd0de0a2a4666b3f6f397aca7", + "derived_key": [ + 48,100,113,86,255,138,164,229,100,8,99,14,34,251,194,115,119,250,250,242,7,188,204,248,210,254,18,115,9,165,229,233 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000215cd6f306d757d5d2fdbb460c60f298584179eecc07196c7ae8e4f8d64" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0x9961618bfad393730ea065a18399303330f1395f", + "derived_key": [ + 200,51,40,178,38,74,180,112,167,221,220,163,38,200,255,61,159,78,76,252,60,226,78,168,221,216,201,180,12,20,188,185 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000201e923f3e47adfa7c4f0f61ea8dd4256f66d1099970bcd314892864917" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xcc4b013229ffd6cb5eae5876251874172cafed0a", + "derived_key": [ + 229,176,77,5,169,51,88,54,33,49,122,209,137,227,159,45,116,33,7,146,238,29,46,153,91,171,175,162,128,71,14,27 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100015306fb0f0b5fde219996551c8072c6711bba5300b2669aeb0f0e4c7445" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xcc8e14c05825cde94522515a0303e4c2e07ca6f9", + "derived_key": [ + 250,253,202,115,87,157,171,40,23,48,73,193,157,78,81,69,162,232,29,120,68,42,125,135,121,254,156,149,143,198,173,119 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000035ba31ed16c21160780ac09763bfd593b1748f69e683a9fb10921aa49d" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xdb5b25a5c3ff135d39df0dd3417a6b26724d2b24", + "derived_key": [ + 188,67,139,78,87,51,241,113,6,164,3,59,144,48,243,76,127,49,1,147,79,102,218,253,36,37,149,91,92,247,35,64 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10000ed306b29128e065afdce73ebc8e0ecd08cd918fdc0f74c03f64d515e48" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xdc0ed6fcbfe51c9f84a662e64fd1347736aa7486", + "derived_key": [ + 6,115,118,133,155,130,106,66,115,187,68,69,230,28,222,77,91,95,90,23,4,86,255,161,95,247,195,108,233,152,241,190 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100013f4a2c964c71d49f2d8876a11eee4bf4e8d19231652fc775b2cae43a21" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xe024f9e4e8fa2f08f768c1cb56bc4a6e3cbd8834", + "derived_key": [ + 129,4,114,76,225,212,61,128,125,223,69,48,213,107,167,249,183,181,194,21,67,99,215,247,166,215,108,189,158,61,249,130 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100006721ab911a41d8b52502ea4cdf42ec99e5e529be6a3e66f3adb2143c5a" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xe300eb4b0834a551cac3e93f30380643ce153408", + "derived_key": [ + 51,232,118,240,225,158,242,19,13,216,95,254,79,35,196,212,101,148,164,24,219,221,10,181,111,253,164,76,93,72,246,206 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10005bb7471a0016b1c330f767a5e73ad0c330934694cad007c5aa3f926be65" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xf0e50c6be2ab13f852259d99317e0fa1511ed273", + "derived_key": [ + 25,143,99,94,50,239,35,14,215,12,184,219,25,32,81,51,246,142,27,126,246,157,133,33,13,119,172,197,111,163,43,234 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1000515657ebe014d9151480712de7a108d9b0e1a7a798d3da2945ba53cd103" + }, + { + "address": "0x0000000000000000000000000000000000008002", + "key": "0xfa1e2d910cfa3e5c465f7d7f69c224d542e0a598", + "derived_key": [ + 90,251,120,52,143,255,91,253,53,60,239,129,160,65,213,230,214,195,241,114,123,145,145,220,232,75,132,91,7,118,101,237 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10006311257c403bbba7e2aef0aa20822627c82ec7484fcb31b87dd8c582aa9" + }, + { + "address": "0x0000000000000000000000000000000000008003", + "key": "0x64534fbb7489d8b2e0974a2a70dee20ad40795d90f17c1a6d62ba36ea19e007", + "derived_key": [ + 240,163,222,200,2,37,101,9,35,172,42,74,77,142,96,167,8,137,208,171,61,234,142,107,218,41,37,203,138,127,216,252 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x200000000000000000000000000000000" + }, + { + "address": "0x0000000000000000000000000000000000008003", + "key": "0x1b458e5ab877fea2e4abf98d12b31ec3f7c93fd4856e807f684322e8cf11fdf7", + "derived_key": [ + 174,229,34,198,20,187,1,37,21,66,226,45,128,16,30,45,151,85,103,77,143,214,69,38,254,154,44,77,223,171,97,143 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100000000000000000000000000000000" + }, + { + "address": "0x0000000000000000000000000000000000008003", + "key": "0x810ca1ae825b138452fb743e9948f909b6286cbfadd5a899190fcb21a75443ab", + "derived_key": [ + 0,159,186,133,75,226,253,235,173,50,111,19,111,136,219,244,177,114,214,77,28,237,51,180,171,99,164,148,28,226,73,151 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x700000000000000000000000000000000" + }, + { + "address": "0x0000000000000000000000000000000000008003", + "key": "0xe6d904d46c5d8b2934bf40eee45740c707124a9797010ceae3f79534391b6de5", + "derived_key": [ + 201,172,113,10,243,67,127,194,244,249,48,131,50,164,72,10,88,81,76,45,149,28,73,119,114,174,142,141,132,8,175,27 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x200000000000000000000000000000000" + }, + { + "address": "0x0000000000000000000000000000000000008003", + "key": "0xeaa2b2fbf0b42c559059e5e9510edc15755f1c1883f0e41d5ba5f9aea4ac201a", + "derived_key": [ + 182,81,125,81,147,30,201,86,98,178,2,213,133,189,82,214,234,207,27,118,113,82,28,46,150,32,45,104,62,223,226,99 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xe00000000000000000000000000000000" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000035ba31ed16c21160780ac09763bfd593b1748f69e683a9fb10921aa49d", + "derived_key": [ + 28,170,140,159,117,250,84,163,177,210,240,18,225,217,234,99,118,79,112,157,28,25,151,121,72,28,143,77,92,237,107,62 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100005b8e587974043d59bffbf632d020e764959abe62e4c238d8df2e62b2b5", + "derived_key": [ + 45,86,211,71,72,250,222,240,196,161,223,115,65,15,173,85,177,255,211,89,90,168,146,255,238,205,12,128,137,196,203,27 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100006721ab911a41d8b52502ea4cdf42ec99e5e529be6a3e66f3adb2143c5a", + "derived_key": [ + 12,138,2,247,115,213,230,144,244,253,93,195,182,20,84,243,71,244,71,24,244,103,128,231,65,233,198,173,128,126,246,169 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000067554f53c6660e53bd9729f00aee0189010dfeb868fa3cbd481232480c", + "derived_key": [ + 185,67,192,97,81,170,106,240,157,23,26,106,216,228,65,120,68,165,135,110,2,31,216,158,187,67,79,105,151,157,234,15 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10000c328f328dd820a1dd698047a7f7d49874d8259196e273517430af8f480", + "derived_key": [ + 12,91,247,177,168,165,63,93,186,29,121,106,121,167,27,50,198,22,230,125,252,159,77,132,92,155,115,251,100,87,112,147 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10000ed306b29128e065afdce73ebc8e0ecd08cd918fdc0f74c03f64d515e48", + "derived_key": [ + 127,216,219,101,110,162,74,65,114,24,131,123,143,204,15,44,36,72,8,136,170,255,39,231,108,143,128,71,65,95,117,4 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10000fd53068fc35c6a23fee067bcfd3fc0d880ba4f79d8b65aafbff02f305d", + "derived_key": [ + 4,27,221,133,245,254,194,84,195,88,19,141,109,233,58,225,116,116,251,225,170,44,159,23,28,181,85,238,6,151,63,144 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100013f4a2c964c71d49f2d8876a11eee4bf4e8d19231652fc775b2cae43a21", + "derived_key": [ + 130,114,163,11,234,79,36,77,175,198,107,147,58,183,234,134,122,178,61,205,225,34,184,146,138,50,221,70,198,19,2,191 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10001430633c573868f335e3106fc558badc506762eb8184ef9ce785def6ece", + "derived_key": [ + 41,151,245,131,127,195,211,240,254,148,60,106,169,97,173,173,118,116,195,243,213,115,169,17,155,83,25,181,108,68,48,51 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100015306fb0f0b5fde219996551c8072c6711bba5300b2669aeb0f0e4c7445", + "derived_key": [ + 236,254,112,114,24,207,172,186,132,163,119,198,20,66,226,51,51,142,39,212,88,198,68,118,92,136,138,204,26,155,11,165 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10001b37e5f8cafac924b3e663779e12c80dace8a02b1d273ae708f275ad62a", + "derived_key": [ + 215,116,3,65,140,246,136,209,81,15,184,214,188,49,94,63,81,57,12,135,108,143,41,137,113,88,11,84,15,30,158,184 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000201e923f3e47adfa7c4f0f61ea8dd4256f66d1099970bcd314892864917", + "derived_key": [ + 50,203,140,60,152,140,103,117,103,130,42,29,70,236,110,49,211,18,247,40,117,35,54,107,171,190,233,18,117,69,68,43 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000215cd6f306d757d5d2fdbb460c60f298584179eecc07196c7ae8e4f8d64", + "derived_key": [ + 95,50,92,228,81,22,16,190,182,42,66,158,131,165,204,25,25,20,143,210,29,170,143,129,94,111,129,132,227,28,102,180 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100023dc5e29b1af44a05d231db67a62a8bfd0c06217caa29b061daa7f2913f", + "derived_key": [ + 96,153,155,64,152,142,147,161,102,88,190,194,238,147,14,243,71,26,33,184,193,50,249,29,88,2,52,157,179,7,69,77 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000323b7266314b9d3ec387d5bcd9590a50be00e788837ca5a880d97e3ee83", + "derived_key": [ + 212,255,51,143,196,70,53,156,98,221,171,235,82,21,252,198,242,28,2,246,195,67,6,91,2,240,95,173,200,49,66,89 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100048bf1b165f4c850d01eb81e5c040f70ea01dca019e780a87f80ec7897c3", + "derived_key": [ + 58,212,15,123,117,242,193,79,217,63,177,112,56,232,153,140,93,188,111,168,108,138,82,113,212,107,209,150,246,205,191,157 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000515657ebe014d9151480712de7a108d9b0e1a7a798d3da2945ba53cd103", + "derived_key": [ + 204,180,39,152,129,136,139,125,156,240,127,28,205,2,65,12,140,132,177,76,5,4,95,204,205,9,179,77,57,148,6,231 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10005b597eb02058b230cee6a9cdf4705410d151ad1ea6ad3e2de77841acfc2", + "derived_key": [ + 249,145,142,225,129,214,86,160,12,71,51,28,109,238,246,115,57,184,6,234,138,46,107,81,103,128,201,242,101,51,179,68 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10005bb7471a0016b1c330f767a5e73ad0c330934694cad007c5aa3f926be65", + "derived_key": [ + 17,109,102,169,143,117,42,93,149,160,20,188,122,34,0,140,248,73,206,232,146,65,183,250,61,35,40,54,167,63,173,215 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100061dc40d0bc2cb81905e6bc1421ebd4bd8b79aacb8da4f9d7145b288674f", + "derived_key": [ + 58,197,183,31,149,121,187,250,193,140,202,222,69,149,235,105,78,113,59,213,78,241,15,40,62,137,46,19,193,78,85,31 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x10006311257c403bbba7e2aef0aa20822627c82ec7484fcb31b87dd8c582aa9", + "derived_key": [ + 209,81,122,212,165,252,102,254,115,58,127,209,26,21,188,113,69,3,30,255,154,72,181,219,97,7,227,96,209,19,138,181 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x100070f1cd4e2830607af30192743910fdfabf1dd004a8073944275819d1dfe", + "derived_key": [ + 151,245,134,14,100,30,161,175,227,142,158,71,197,157,213,103,198,28,241,51,173,107,242,84,76,53,176,101,132,26,29,60 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008004", + "key": "0x1000ecfaeb837bd098bcf9bde6fc2ccd8e8a9355a1b70e601ac18cd089eb308", + "derived_key": [ + 72,239,59,253,40,148,227,213,236,98,100,14,198,212,71,148,180,209,64,152,228,196,11,209,109,231,183,97,135,156,172,241 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xe778a21bcfe90796edfc6e5dba276e58537d4ff192bc30765e18d9ef2aa9a55", + "derived_key": [ + 205,107,120,198,53,118,206,64,8,204,37,15,58,43,95,189,38,38,203,212,73,105,50,160,21,160,38,124,10,233,46,22 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10e" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x201f2e5549e69135c92587d30523c730fd01553abf72828402fad9b12c172e10", + "derived_key": [ + 204,0,103,138,56,182,245,161,43,137,56,202,232,138,228,30,242,80,214,237,253,8,17,251,148,203,85,106,127,162,114,20 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x35dc0a033f8f3476b52059199e9babf078fddd76cb3c290e05ae42462bfc33eb", + "derived_key": [ + 89,16,198,41,13,94,89,2,119,169,28,80,179,104,66,21,38,252,16,146,163,159,122,68,234,161,165,150,251,139,57,4 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x52f431aa35dd80982e3c66614112503ca2c6e344745f4a2bcfc9bd7e09c75584" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x3859fd065954dbed7c74a1359d0e5bc38403ea4cdf0274ae615ce0e3e2afec6b", + "derived_key": [ + 67,87,214,19,120,116,147,47,78,236,178,95,23,23,1,171,197,181,63,197,52,162,224,221,93,223,35,243,248,138,100,215 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10002" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x41c023ccaa2a67013d253ba3488447c2db3843b3f988653fdf8d7c7268862ca9", + "derived_key": [ + 94,249,142,243,210,28,27,218,191,10,139,245,66,107,2,111,153,125,18,238,76,249,208,69,34,173,165,21,177,18,82,239 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10e" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x4792eb7ea10cfac9f83a8d12d965c903854b51c5cb0783e082741ecf0c20dcfe", + "derived_key": [ + 217,248,73,34,237,151,158,186,178,226,225,234,58,186,218,7,175,174,60,185,248,248,25,28,51,154,61,168,213,77,242,169 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x52f431aa35dd80982e3c66614112503ca2c6e344745f4a2bcfc9bd7e09c75584" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x5bad0400c1a2cec7acfd85c5c5c25108540c42f405d3ae6ea01209dfbcc63c29", + "derived_key": [ + 194,179,227,24,162,84,191,59,100,89,207,244,98,199,135,44,91,35,210,22,182,249,66,219,89,32,250,61,112,54,16,141 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x71d2d9399f0017d99e02441b51c782e6f5613748934c615622bc6f2327b79b8d", + "derived_key": [ + 60,170,232,127,53,17,58,173,142,247,89,247,207,149,119,134,64,14,158,82,18,231,188,179,163,89,11,174,81,43,46,153 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x9" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x75579ad6152f71bd465c7f980c773c6df73f53d82aebf8b69c1173f678af2d81", + "derived_key": [ + 132,119,216,135,171,185,255,65,210,32,46,77,152,229,32,71,244,141,39,140,188,245,22,25,184,28,198,202,132,222,174,160 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10002" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x87c06ae8fd6d2ee9919bb86c39ee03f70b0d87028d77b914408152f07043c769", + "derived_key": [ + 119,228,107,44,53,217,61,182,86,125,189,169,81,109,32,249,139,212,234,72,144,24,135,118,89,121,216,219,24,207,66,168 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10e" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x962d8512b88c87f0272660761794a46a130b867d7d15b38fc1adc33433e4fce8", + "derived_key": [ + 213,5,71,76,80,42,16,77,105,27,101,50,79,76,38,232,167,55,134,79,128,251,113,33,35,116,77,254,28,6,176,0 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x52f431aa35dd80982e3c66614112503ca2c6e344745f4a2bcfc9bd7e09c75584" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0x9f4693a69c182083198dd36e2803bc42bbe3f851aa03cb0f0de7687a2171336b", + "derived_key": [ + 110,177,116,132,158,116,7,177,77,240,138,82,212,212,241,43,54,8,1,75,42,104,243,5,241,73,226,60,72,169,2,41 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10003" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xb35ae26426d210bd3178c283cdcb50ce0cdbff27177eb0786fc3fe0f45083b1d", + "derived_key": [ + 73,127,150,33,212,30,131,171,90,28,221,170,53,22,176,210,81,154,146,160,81,67,188,184,7,13,240,169,97,51,230,181 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x64" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xb62ada1fb8084bc5425b2aea59d59080ac3d0a10a1cc368978230741dca77a19", + "derived_key": [ + 71,111,167,179,223,229,107,45,223,184,100,207,103,16,106,234,217,25,120,51,156,12,142,28,186,4,134,110,182,28,191,11 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x9" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xcb5ca2f778293159761b941dc7b8f7fd374e3632c39b35a0fd4b1aa20ed4a091", + "derived_key": [ + 62,169,255,238,205,94,99,210,162,31,213,85,158,233,223,231,174,18,241,77,26,133,255,75,40,190,65,163,26,48,53,196 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10002" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xd8fc94fc3444dd0233f4f4f74b08d69d0079035017309fa37c5b30a7cabb729b", + "derived_key": [ + 232,83,248,233,232,89,11,170,74,117,125,224,222,189,198,137,244,49,205,228,155,200,97,42,160,89,8,63,109,25,91,168 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x9" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xd9ba5de301f3948ee34a04905cc32b778b54dac455410e096889003b0770d47c", + "derived_key": [ + 255,134,239,235,78,5,0,110,98,20,109,14,192,231,250,72,49,145,191,114,177,51,38,242,67,121,217,71,114,50,124,171 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x9" + }, + { + "address": "0x0000000000000000000000000000000000008005", + "key": "0xf7fa34f014959c990f8cabd865f6012c5ad2ae9390bd21dc8ab2c3ee9c340257", + "derived_key": [ + 209,167,69,145,2,139,203,92,187,46,4,30,218,0,85,77,176,3,253,201,73,229,148,92,229,57,32,59,244,12,109,96 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x9" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x1", + "derived_key": [ + 113,233,23,33,249,145,133,118,215,96,240,47,3,202,196,124,111,64,3,49,96,49,132,142,60,29,153,230,232,58,71,67 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xe29e64ae9c38000" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x1b458e5ab877fea2e4abf98d12b31ec3f7c93fd4856e807f684322e8cf11fdf7", + "derived_key": [ + 49,87,111,239,58,195,179,2,237,163,15,66,168,74,199,52,200,236,175,1,55,3,126,248,127,239,193,246,133,27,151,79 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8603151b10a4a0" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x810ca1ae825b138452fb743e9948f909b6286cbfadd5a899190fcb21a75443ab", + "derived_key": [ + 185,211,150,83,253,116,26,253,56,22,83,204,70,30,122,203,221,134,84,251,39,141,138,17,246,159,212,31,236,239,75,201 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x53c1850fab698c0" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0xafe379b9510a75924647deef7e3d3d3ebf948699c9f84eda83c07c71414098b8", + "derived_key": [ + 250,91,168,183,69,6,78,180,185,147,215,10,134,34,96,243,26,77,158,213,121,211,188,200,73,204,177,205,8,52,178,106 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x4a88ebbafe2b20" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0xeaa2b2fbf0b42c559059e5e9510edc15755f1c1883f0e41d5ba5f9aea4ac201a", + "derived_key": [ + 141,97,126,192,90,203,191,95,226,69,41,166,75,35,133,169,106,173,67,240,155,225,173,169,44,112,64,49,220,193,72,27 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x81d41f918fe1780" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x0", + "derived_key": [ + 150,46,36,83,88,148,64,235,173,169,107,3,33,223,255,240,191,103,10,254,52,186,74,130,141,51,66,227,241,78,210,217 + ], + "enumeration_index": 60, + "initial_value": "0x10e", + "final_value": "0x1f9" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x6", + "derived_key": [ + 162,84,32,193,217,215,5,53,140,19,76,198,1,217,209,132,203,77,253,222,126,28,172,43,195,212,211,139,249,236,68,230 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x5f5e100" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x7", + "derived_key": [ + 18,59,175,197,134,247,119,100,72,140,210,76,106,119,84,110,90,15,232,189,251,79,162,3,207,175,252,54,204,228,221,91 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100000000000000000000000066fc0241" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x9", + "derived_key": [ + 142,125,208,106,197,183,59,71,59,230,188,90,81,3,15,76,116,55,101,124,183,178,155,243,118,197,100,184,209,103,90,94 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x200000000000000000000000066fc0242" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xb", + "derived_key": [ + 75,168,78,31,55,208,65,188,110,85,186,57,104,38,204,73,78,132,212,129,91,109,181,38,144,66,46,234,115,134,49,79 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xe8e77626586f73b955364c7b4bbf0bb7f7685ebd40e852b164633a4acbd3244c" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xc", + "derived_key": [ + 61,226,32,44,203,98,106,211,135,215,7,34,230,79,190,68,86,46,47,35,26,41,12,8,83,43,141,106,186,64,47,245 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xf2aaeb06c0e556434ac93c232f37dd8be0a7bf9f430f76eb564df9fcb770c45f" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x10c", + "derived_key": [ + 121,9,53,136,208,232,71,239,167,58,16,206,32,228,121,159,177,228,102,66,214,86,23,199,229,33,63,160,73,137,217,45 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x200000000000000000000000066fc0242" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x10d", + "derived_key": [ + 135,222,210,71,225,102,15,130,112,113,199,241,55,25,52,88,151,81,8,83,132,252,159,68,98,193,241,137,124,92,62,239 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x100000000000000000000000000000001" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x10f", + "derived_key": [ + 134,36,129,147,235,77,210,168,206,129,95,135,108,18,77,72,53,149,34,240,133,77,149,216,7,46,175,240,211,125,85,189 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x320" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x110", + "derived_key": [ + 62,137,13,108,44,59,173,166,238,204,150,3,169,156,28,98,89,237,90,100,2,241,199,108,193,139,86,140,58,239,186,15 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x22a9" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x5eff886ea0ce6ca488a3d6e336d6c0f75f46d19b42c06ce5ee98e42c96d256c7", + "derived_key": [ + 145,29,210,173,116,63,242,55,212,17,100,138,15,227,44,109,116,238,192,96,113,106,42,116,53,47,107,28,67,91,93,103 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xf9030b78c5bf5ac997a76962aa32c90a6d8e8ebce9838c8eeb388d73e1f7659a" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0x0", + "derived_key": [ + 214,116,246,54,163,90,111,26,81,86,78,195,55,27,156,77,163,18,109,90,208,186,227,80,207,199,250,234,199,99,99,184 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0x33", + "derived_key": [ + 104,98,246,102,108,85,7,252,51,21,214,132,35,158,2,38,112,107,69,195,65,114,145,245,183,172,194,211,57,80,82,17 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8ad65915f42c06ad06f23e33feee4f57060cd249" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0xc9", + "derived_key": [ + 180,68,244,119,54,206,136,162,78,107,80,251,5,29,192,174,93,179,175,68,217,8,246,220,217,160,21,208,74,126,225,227 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10003" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0xd3", + "derived_key": [ + 59,172,224,22,174,10,65,231,169,237,9,168,91,33,85,109,38,187,242,242,75,76,32,165,75,187,165,27,95,83,162,158 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10005" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0xd5", + "derived_key": [ + 237,36,132,158,202,168,131,171,106,32,214,79,172,224,148,150,15,71,73,102,217,162,19,183,2,117,192,112,196,76,181,34 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x3db94d72a25f64874243af776e06f449d55ba9dd" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0x4d7101ab951ded1d6f6a567c6e539f8f6a2a675fe1d5eba86fefe5192175b131", + "derived_key": [ + 83,157,81,206,111,89,151,62,178,167,63,16,226,11,189,169,125,149,14,110,8,62,221,87,116,233,142,217,139,253,153,16 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0x8e94fed44239eb2314ab7a406345e6c5a8f0ccedf3b600de3d004e672c33abf4", + "derived_key": [ + 186,154,222,88,248,170,108,168,43,242,42,43,72,15,245,221,236,232,166,232,99,81,164,123,16,213,143,51,128,251,219,183 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0x99d6a8ff20aa8acdd49c8fb0cc74f2b2b57e0fa371d5aadb8e266a8cf9157ef5", + "derived_key": [ + 103,185,128,156,225,233,200,126,96,129,32,179,163,131,84,200,153,155,236,34,245,43,19,243,165,109,226,10,22,113,50,131 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010002", + "key": "0xca59cc8f90e9fd91e0bc61c0c980b4b130ad1217252dd3bc209e6dfa57a05f63", + "derived_key": [ + 203,224,11,159,230,121,179,34,119,46,123,13,250,7,202,214,183,18,124,144,172,158,237,255,172,53,228,144,236,81,142,168 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0" + }, + { + "address": "0x0000000000000000000000000000000000010003", + "key": "0x0", + "derived_key": [ + 183,144,135,107,116,64,90,68,61,138,52,178,244,96,20,237,96,5,52,90,158,129,172,204,39,175,55,18,74,73,29,222 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0x0000000000000000000000000000000000010003", + "key": "0x33", + "derived_key": [ + 36,49,6,86,93,12,79,206,248,151,94,121,177,178,35,12,1,159,78,58,178,122,63,78,124,169,48,107,159,98,153,132 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8ad65915f42c06ad06f23e33feee4f57060cd249" + }, + { + "address": "0x0000000000000000000000000000000000010003", + "key": "0xfa", + "derived_key": [ + 208,111,9,167,39,114,89,78,179,210,171,35,115,181,211,197,236,176,132,184,74,77,237,45,48,18,241,69,222,221,138,25 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x5de08929a3c0835232a7cd201d338317caabd873" + }, + { + "address": "0x0000000000000000000000000000000000010003", + "key": "0x56ca7d7fc0d180f3d83f99276f19310b5c00992edd8618fb359971a7ecb99ab3", + "derived_key": [ + 199,117,193,115,55,85,171,245,34,173,219,214,151,226,215,10,91,97,70,75,209,104,27,41,137,81,196,246,13,142,199,12 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10004" + }, + { + "address": "0x0000000000000000000000000000000000010003", + "key": "0x635799b36cb7719b903c111d5790821f9e51e29061bc47a57c7988be806aff32", + "derived_key": [ + 113,24,36,136,41,22,138,100,28,59,149,105,31,231,215,27,33,193,211,238,215,254,44,202,236,107,125,180,46,38,146,200 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10002" + }, + { + "address": "0x0000000000000000000000000000000000010004", + "key": "0x0", + "derived_key": [ + 38,182,122,48,1,162,205,218,95,52,172,146,222,81,199,193,42,178,228,105,133,88,214,83,137,237,66,230,119,250,5,85 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0x0000000000000000000000000000000000010004", + "key": "0x33", + "derived_key": [ + 253,39,5,238,88,207,60,229,238,92,28,224,63,70,109,126,152,54,188,71,18,186,162,153,21,61,132,71,202,121,113,207 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8ad65915f42c06ad06f23e33feee4f57060cd249" + }, + { + "address": "0x0000000000000000000000000000000000010004", + "key": "0xc9", + "derived_key": [ + 191,14,175,103,174,227,73,235,177,89,118,163,111,237,172,26,74,6,101,80,58,211,113,127,243,254,106,224,120,179,101,156 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x4c48bdb0145a89bd87f8884f3fa8c1e42d585e75" + }, + { + "address": "0x0000000000000000000000000000000000010004", + "key": "0xfb", + "derived_key": [ + 184,2,226,247,10,56,240,40,136,100,146,139,246,198,195,194,209,164,217,45,202,10,147,86,175,254,198,249,92,64,121,164 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10000fd53068fc35c6a23fee067bcfd3fc0d880ba4f79d8b65aafbff02f305d" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x0", + "derived_key": [ + 168,74,2,141,105,206,208,33,164,189,140,91,38,36,168,242,80,217,18,184,248,245,157,129,85,249,94,94,229,138,101,38 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x4", + "derived_key": [ + 47,241,202,110,153,206,246,252,92,214,119,95,159,94,245,61,243,40,240,8,26,143,180,81,247,55,255,244,73,12,229,83 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x5", + "derived_key": [ + 95,192,113,95,216,242,21,235,124,16,227,245,80,217,178,9,241,140,170,135,64,175,84,27,211,70,239,73,100,139,20,245 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x6", + "derived_key": [ + 201,169,20,123,206,251,168,141,33,64,175,106,246,185,19,185,53,101,125,53,5,87,5,184,7,21,91,61,208,130,42,131 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x36b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0", + "derived_key": [ + 183,44,52,109,222,204,99,77,172,182,15,29,40,214,131,168,39,33,227,213,36,163,61,162,168,47,3,62,136,241,101,126 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x16db2e4b9f8dc120de98f8491964203ba76de27b27b29c2d25f85a325cd37477", + "derived_key": [ + 133,79,165,174,6,191,41,30,209,5,109,104,28,93,197,246,247,13,23,242,234,3,204,110,233,229,198,255,131,62,203,105 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x7bbeda1ca523343d5e888708327d45f8c743f6cb29e139a7e03dc5068543e6c4", + "derived_key": [ + 211,102,235,93,180,24,37,200,29,129,191,72,73,93,114,116,50,181,244,253,225,248,223,46,101,251,180,223,113,77,242,139 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x46700b4d40ac5c35af2c22dda2787a91eb567b06c924a8fb8ae9a05b20c08c21" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0x8e94fed44239eb2314ab7a406345e6c5a8f0ccedf3b600de3d004e672c33abf4", + "derived_key": [ + 228,96,52,194,207,181,181,131,126,57,95,233,204,152,190,4,82,34,235,53,200,202,40,109,252,73,189,213,239,94,126,130 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0xac33ff75c19e70fe83507db0d683fd3465c996598dc972688b7ace676c89077b", + "derived_key": [ + 27,8,240,147,212,200,27,37,231,124,191,110,45,189,91,214,149,171,253,138,221,47,115,230,14,214,92,143,87,109,114,128 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1f9" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0xe14c171e271191dbbbddd568a762a4325466b12116e776c3243375f110708d73", + "derived_key": [ + 248,63,134,138,17,62,7,250,227,100,52,50,139,214,30,153,110,204,16,117,222,9,119,59,220,202,187,15,30,237,162,217 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0xe14c171e271191dbbbddd568a762a4325466b12116e776c3243375f110708d74", + "derived_key": [ + 207,50,50,89,38,214,97,46,146,127,167,239,70,37,230,216,37,111,63,130,63,184,65,242,102,240,65,120,90,218,241,226 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x0000000000000000000000000000000000010005", + "key": "0xf652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f", + "derived_key": [ + 247,124,190,104,95,142,126,239,68,219,69,165,161,237,129,135,165,5,236,239,227,84,140,240,18,4,129,67,95,125,116,254 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x46700b4d40ac5c35af2c22dda2787a91eb567b06c924a8fb8ae9a05b20c08c21" + }, + { + "address": "0x03e013dc3eb10bbd48a8f9c94758f04d081563b6", + "key": "0x0", + "derived_key": [ + 64,31,45,164,25,35,131,214,111,103,185,66,123,36,77,209,130,54,238,77,124,250,76,42,126,68,137,156,53,223,112,84 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x03e013dc3eb10bbd48a8f9c94758f04d081563b6", + "key": "0x33", + "derived_key": [ + 172,109,18,138,162,172,98,227,191,233,228,200,186,6,38,31,205,90,238,83,85,200,140,40,95,174,70,100,236,184,92,217 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xf42d59d064dc3efa5c6984103f5354a4578f9d38" + }, + { + "address": "0x03e013dc3eb10bbd48a8f9c94758f04d081563b6", + "key": "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc", + "derived_key": [ + 238,206,71,73,61,115,66,84,21,49,226,43,98,209,124,67,230,245,74,241,47,105,36,12,239,120,5,217,170,54,156,84 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xf0e50c6be2ab13f852259d99317e0fa1511ed273" + }, + { + "address": "0x03e013dc3eb10bbd48a8f9c94758f04d081563b6", + "key": "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103", + "derived_key": [ + 133,159,227,100,212,172,149,230,75,232,210,108,56,145,60,23,37,166,185,84,193,191,193,253,113,198,103,19,58,1,211,88 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8ad65915f42c06ad06f23e33feee4f57060cd249" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x33", + "derived_key": [ + 244,204,176,178,59,175,3,143,139,2,242,240,21,87,122,194,191,65,151,96,89,50,229,228,174,155,172,240,102,252,221,88 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x36615cf349d7f6344891b1e7ca7c72883f5dc049" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x9a", + "derived_key": [ + 97,131,206,41,240,31,150,151,163,154,135,110,104,89,178,252,214,86,245,40,217,82,157,194,186,14,137,246,116,87,3,221 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x58d1e441af5027fe6e4c6b749dde48721cef3bb31a08d3d64b9c6e9c725abc72" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x9b", + "derived_key": [ + 36,198,234,1,57,238,186,239,33,185,70,78,68,110,74,247,188,177,180,151,164,216,15,115,133,254,13,247,190,87,17,67 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xe8949117b1d97ac18a8d1224be1e4045c67b698392d4edc03db3ed46b9e94673" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x9c", + "derived_key": [ + 24,243,123,134,88,68,215,142,127,133,2,149,85,85,176,160,214,111,3,112,143,142,78,137,211,79,138,29,6,66,209,69 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xfa1e2d910cfa3e5c465f7d7f69c224d542e0a598" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x9d", + "derived_key": [ + 168,81,60,218,234,227,67,148,118,29,59,147,53,55,78,40,8,227,39,217,122,18,110,222,78,162,140,204,238,55,6,95 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1900000000" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x9f", + "derived_key": [ + 144,32,12,17,126,234,225,99,200,138,138,108,231,51,212,1,171,8,94,147,139,188,115,131,162,159,107,192,34,19,171,180 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x97b2219068b5104fd0de0a2a4666b3f6f397aca7" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0xa3", + "derived_key": [ + 86,83,107,175,210,186,181,198,128,36,151,56,29,81,196,245,31,168,78,1,68,190,24,94,31,195,247,20,122,219,85,214 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xace084d9e79dcdae011409ea9454d78218092e0b189cd6a5173b44667400d511" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc", + "derived_key": [ + 230,13,150,23,44,152,198,242,109,118,74,176,93,184,102,158,85,104,138,31,78,160,214,75,29,223,239,114,74,97,156,59 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x5e45123c75ae6b22222a2ef40afcf080e84f5af5" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0x8e94fed44239eb2314ab7a406345e6c5a8f0ccedf3b600de3d004e672c33abf4", + "derived_key": [ + 105,207,185,63,169,21,50,218,22,157,164,50,155,208,136,29,218,73,246,12,13,209,254,95,239,141,35,221,253,207,221,212 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0xa2493ae5fceab9e59e3829df3da317d9a236c9b8b11dc1da94cb0e047a357cad", + "derived_key": [ + 248,135,166,243,99,75,238,65,244,69,142,255,92,110,93,81,203,0,116,149,135,131,96,149,14,49,60,161,204,107,128,214 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + }, + { + "address": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0", + "key": "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103", + "derived_key": [ + 164,102,179,128,123,95,250,209,85,215,47,223,202,234,9,179,137,135,46,66,252,162,20,55,210,106,243,173,46,40,178,89 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x36615cf349d7f6344891b1e7ca7c72883f5dc049" + }, + { + "address": "0x338bd2ded4569c568f21da174acec70b826e550c", + "key": "0x0", + "derived_key": [ + 156,179,78,101,253,32,134,94,34,150,203,47,196,201,136,120,12,142,64,149,165,101,204,29,186,80,109,39,8,84,79,232 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8002cd98cfb563492a6fb3e7c8243b7b9ad4cc92" + }, + { + "address": "0x338bd2ded4569c568f21da174acec70b826e550c", + "key": "0x1", + "derived_key": [ + 140,190,129,206,40,197,26,241,53,110,14,80,134,5,37,204,178,45,166,159,129,221,136,86,105,96,102,217,243,16,89,249 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x683cfc9cb6230b80899dbcb6181591d69089d8a4" + }, + { + "address": "0x4c48bdb0145a89bd87f8884f3fa8c1e42d585e75", + "key": "0x0", + "derived_key": [ + 177,52,180,226,24,99,117,67,64,64,241,12,41,231,167,74,209,204,218,129,255,34,102,39,251,93,142,41,145,92,203,50 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8ad65915f42c06ad06f23e33feee4f57060cd249" + }, + { + "address": "0x4c48bdb0145a89bd87f8884f3fa8c1e42d585e75", + "key": "0x1", + "derived_key": [ + 164,143,172,103,50,238,187,229,211,197,117,198,212,234,78,82,14,197,48,46,66,117,89,74,193,245,113,83,209,177,183,217 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x517d9f66c564d2edf43cb657424f72f1c20d3bff" + }, + { + "address": "0x517d9f66c564d2edf43cb657424f72f1c20d3bff", + "key": "0x0", + "derived_key": [ + 230,154,81,238,249,118,81,47,67,167,51,81,39,137,145,76,187,230,155,155,70,102,153,242,27,165,84,224,218,253,106,44 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0x5e45123c75ae6b22222a2ef40afcf080e84f5af5", + "key": "0x8e94fed44239eb2314ab7a406345e6c5a8f0ccedf3b600de3d004e672c33abf4", + "derived_key": [ + 60,175,19,171,24,127,90,244,102,215,109,52,163,108,87,248,78,75,87,18,254,157,225,108,251,34,166,46,135,226,155,114 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + }, + { + "address": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "key": "0x0", + "derived_key": [ + 137,66,124,98,185,206,118,62,215,166,225,249,150,252,161,203,0,217,115,193,56,251,207,17,2,102,50,45,221,187,226,50 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x5de08929a3c0835232a7cd201d338317caabd8730002" + }, + { + "address": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "key": "0x1", + "derived_key": [ + 86,146,226,162,251,7,31,172,102,81,196,200,22,126,55,247,235,106,113,202,76,139,128,246,85,114,151,178,101,95,159,191 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x338bd2ded4569c568f21da174acec70b826e550c" + }, + { + "address": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "key": "0x2", + "derived_key": [ + 238,250,55,221,42,57,86,177,201,103,75,239,3,21,25,161,11,112,13,117,46,113,1,71,100,17,102,72,32,125,98,78 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x10000fd53068fc35c6a23fee067bcfd3fc0d880ba4f79d8b65aafbff02f305d" + }, + { + "address": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "key": "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc", + "derived_key": [ + 22,155,114,188,54,101,62,87,11,121,52,68,25,197,106,219,1,155,20,238,224,223,45,229,125,209,123,117,113,166,132,12 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xdc0ed6fcbfe51c9f84a662e64fd1347736aa7486" + }, + { + "address": "0x62af78a1a86d5fa8aa7b75ee1884d36ca3c3193a", + "key": "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103", + "derived_key": [ + 81,122,205,57,136,220,234,247,156,218,204,189,130,97,114,48,48,160,227,48,48,64,28,58,212,244,195,137,255,228,64,41 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x8002cd98cfb563492a6fb3e7c8243b7b9ad4cc92" + }, + { + "address": "0x683cfc9cb6230b80899dbcb6181591d69089d8a4", + "key": "0x0", + "derived_key": [ + 237,236,215,194,80,85,137,94,24,27,243,41,116,140,187,70,33,135,209,48,214,17,9,198,53,117,79,21,244,235,240,208 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0x97b2219068b5104fd0de0a2a4666b3f6f397aca7", + "key": "0x0", + "derived_key": [ + 219,215,135,89,179,97,218,60,174,210,247,236,175,60,97,114,55,143,26,104,199,6,53,175,153,170,254,4,26,49,33,168 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x36615cf349d7f6344891b1e7ca7c72883f5dc049" + }, + { + "address": "0x97b2219068b5104fd0de0a2a4666b3f6f397aca7", + "key": "0x2", + "derived_key": [ + 131,75,158,95,145,124,241,215,162,81,17,8,190,214,110,90,15,123,1,214,244,31,200,40,196,119,19,72,17,44,27,219 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x2c3c5fb909767b6af655bad72f5d6638f5f5d0a0" + }, + { + "address": "0xdc0ed6fcbfe51c9f84a662e64fd1347736aa7486", + "key": "0x0", + "derived_key": [ + 62,110,21,209,63,190,73,44,77,121,157,143,198,176,46,157,199,11,251,128,18,248,171,99,94,148,201,218,67,21,70,232 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xff" + }, + { + "address": "0xe024f9e4e8fa2f08f768c1cb56bc4a6e3cbd8834", + "key": "0x8e94fed44239eb2314ab7a406345e6c5a8f0ccedf3b600de3d004e672c33abf4", + "derived_key": [ + 16,46,124,130,188,155,165,96,66,61,124,176,157,94,180,222,164,199,68,147,148,121,54,59,60,181,162,4,74,28,114,103 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x1" + } + ], + "aux_commitments": { + "events_queue_commitment": "0xec82208c87a937d88768a0067b2a80f0525eca8288dad2cf96cf8bbe6a1aa565", + "bootloader_initial_content_commitment": "0x97df88dcecbcd29b49773c042cdee7a44c57a741e64913fff5aa1b3484232f28" + }, + "blob_hashes": [ + { + "commitment": "0xf840cf3f6b7dc92729b2b9ef3b399e7b896d553b746362fe81c4eb911013570d", + "linear_hash": "0xff4feb4bef9401731ab9db3626c2e015baa6880d7b1c4382d03b30da3a0fd75e" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1" + } + } +} diff --git a/core/lib/types/src/contract_verification_api.rs b/core/lib/types/src/contract_verification_api.rs index 8ee1d3ec6491..cca5ae5a83a0 100644 --- a/core/lib/types/src/contract_verification_api.rs +++ b/core/lib/types/src/contract_verification_api.rs @@ -137,6 +137,8 @@ pub struct VerificationIncomingRequest { #[serde(flatten)] pub compiler_versions: CompilerVersions, pub optimization_used: bool, + /// Optimization mode used for the contract. Semantics depends on the compiler used; e.g., for `vyper`, + /// allowed values are `gas` (default), `codesize` or `none`. pub optimizer_mode: Option, #[serde(default)] pub constructor_arguments: Bytes, @@ -152,17 +154,19 @@ pub enum CompilerType { Vyper, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(untagged)] pub enum CompilerVersions { #[serde(rename_all = "camelCase")] Solc { - compiler_zksolc_version: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + compiler_zksolc_version: Option, compiler_solc_version: String, }, #[serde(rename_all = "camelCase")] Vyper { - compiler_zkvyper_version: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + compiler_zkvyper_version: Option, compiler_vyper_version: String, }, } @@ -175,29 +179,29 @@ impl CompilerVersions { } } - pub fn zk_compiler_version(&self) -> String { + pub fn zk_compiler_version(&self) -> Option<&str> { match self { - CompilerVersions::Solc { + Self::Solc { compiler_zksolc_version, .. - } => compiler_zksolc_version.clone(), - CompilerVersions::Vyper { + } => compiler_zksolc_version.as_deref(), + Self::Vyper { compiler_zkvyper_version, .. - } => compiler_zkvyper_version.clone(), + } => compiler_zkvyper_version.as_deref(), } } - pub fn compiler_version(&self) -> String { + pub fn compiler_version(&self) -> &str { match self { - CompilerVersions::Solc { + Self::Solc { compiler_solc_version, .. - } => compiler_solc_version.clone(), - CompilerVersions::Vyper { + } => compiler_solc_version, + Self::Vyper { compiler_vyper_version, .. - } => compiler_vyper_version.clone(), + } => compiler_vyper_version, } } } @@ -213,10 +217,21 @@ pub struct VerificationRequest { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct CompilationArtifacts { + /// In case of EVM contracts, this is the creation bytecode (`bytecode` in `solc` output). pub bytecode: Vec, + /// Deployed bytecode (`deployedBytecode` in `solc` output). Only set for EVM contracts; for EraVM contracts, the deployed bytecode + /// is always `bytecode` (i.e., there's no distinction between creation and deployed bytecodes). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub deployed_bytecode: Option>, pub abi: serde_json::Value, } +impl CompilationArtifacts { + pub fn deployed_bytecode(&self) -> &[u8] { + self.deployed_bytecode.as_deref().unwrap_or(&self.bytecode) + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct VerificationInfo { @@ -235,12 +250,6 @@ pub struct VerificationRequestStatus { pub compilation_errors: Option>, } -#[derive(Debug)] -pub enum DeployContractCalldata { - Deploy(Vec), - Ignore, -} - #[cfg(test)] mod tests { use assert_matches::assert_matches; diff --git a/core/lib/types/src/fee.rs b/core/lib/types/src/fee.rs index 9dc2cda9e62b..f302c51cd4a9 100644 --- a/core/lib/types/src/fee.rs +++ b/core/lib/types/src/fee.rs @@ -1,5 +1,4 @@ use serde::{Deserialize, Serialize}; -use zksync_utils::ceil_div; use crate::U256; @@ -43,10 +42,10 @@ pub fn encoding_len( // All of the fields are encoded as `bytes`, so their encoding takes ceil(len, 32) slots. // For factory deps we only provide hashes, which are encoded as an array of bytes32. - let dynamic_len = ceil_div(data_len, 32) - + ceil_div(signature_len, 32) - + ceil_div(paymaster_input_len, 32) - + ceil_div(reserved_dynamic_len, 32) + let dynamic_len = data_len.div_ceil(32) + + signature_len.div_ceil(32) + + paymaster_input_len.div_ceil(32) + + reserved_dynamic_len.div_ceil(32) + factory_deps_len; BASE_LEN + dynamic_len as usize diff --git a/core/lib/types/src/fee_model.rs b/core/lib/types/src/fee_model.rs index b59aa65b04e0..79515e6f63a9 100644 --- a/core/lib/types/src/fee_model.rs +++ b/core/lib/types/src/fee_model.rs @@ -2,10 +2,9 @@ use std::num::NonZeroU64; use bigdecimal::{BigDecimal, ToPrimitive}; use serde::{Deserialize, Serialize}; -use zksync_config::configs::chain::{FeeModelVersion, StateKeeperConfig}; use zksync_system_constants::L1_GAS_PER_PUBDATA_BYTE; -use crate::ProtocolVersionId; +use crate::{ceil_div_u256, ProtocolVersionId, U256}; /// Fee input to be provided into the VM. It contains two options: /// - `L1Pegged`: L1 gas price is provided to the VM, and the pubdata price is derived from it. Using this option is required for the @@ -203,6 +202,7 @@ pub struct FeeModelConfigV2 { /// The maximum amount of pubdata that can be used by the batch. Note that if the calldata is used as pubdata, this variable should not exceed 128kb. pub max_pubdata_per_batch: u64, } + impl Default for FeeModelConfig { /// Config with all zeroes is not a valid config (since for instance having 0 max gas per batch may incur division by zero), /// so we implement a sensible default config here. @@ -213,24 +213,6 @@ impl Default for FeeModelConfig { } } -impl FeeModelConfig { - pub fn from_state_keeper_config(state_keeper_config: &StateKeeperConfig) -> Self { - match state_keeper_config.fee_model_version { - FeeModelVersion::V1 => Self::V1(FeeModelConfigV1 { - minimal_l2_gas_price: state_keeper_config.minimal_l2_gas_price, - }), - FeeModelVersion::V2 => Self::V2(FeeModelConfigV2 { - minimal_l2_gas_price: state_keeper_config.minimal_l2_gas_price, - compute_overhead_part: state_keeper_config.compute_overhead_part, - pubdata_overhead_part: state_keeper_config.pubdata_overhead_part, - batch_overhead_l1_gas: state_keeper_config.batch_overhead_l1_gas, - max_gas_per_batch: state_keeper_config.max_gas_per_batch, - max_pubdata_per_batch: state_keeper_config.max_pubdata_per_batch, - }), - } - } -} - #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub struct FeeParamsV1 { pub config: FeeModelConfigV1, @@ -337,4 +319,442 @@ impl FeeParams { l1_gas_price: 1_000_000_000, }) } + + /// Provides scaled [`BatchFeeInput`] based on these parameters. + pub fn scale( + self, + l1_gas_price_scale_factor: f64, + l1_pubdata_price_scale_factor: f64, + ) -> BatchFeeInput { + match self { + Self::V1(params) => BatchFeeInput::L1Pegged(compute_batch_fee_model_input_v1( + params, + l1_gas_price_scale_factor, + )), + Self::V2(params) => BatchFeeInput::PubdataIndependent(clip_batch_fee_model_input_v2( + compute_batch_fee_model_input_v2( + params, + l1_gas_price_scale_factor, + l1_pubdata_price_scale_factor, + ), + )), + } + } +} + +/// Calculates the batch fee input based on the main node parameters. +/// This function uses the `V1` fee model, i.e. where the pubdata price does not include the proving costs. +fn compute_batch_fee_model_input_v1( + params: FeeParamsV1, + l1_gas_price_scale_factor: f64, +) -> L1PeggedBatchFeeModelInput { + let l1_gas_price = (params.l1_gas_price as f64 * l1_gas_price_scale_factor) as u64; + + L1PeggedBatchFeeModelInput { + l1_gas_price, + fair_l2_gas_price: params.config.minimal_l2_gas_price, + } +} + +/// Calculates the batch fee input based on the main node parameters. +/// This function uses the `V2` fee model, i.e. where the pubdata price does not include the proving costs. +fn compute_batch_fee_model_input_v2( + params: FeeParamsV2, + l1_gas_price_scale_factor: f64, + l1_pubdata_price_scale_factor: f64, +) -> PubdataIndependentBatchFeeModelInput { + let config = params.config(); + let l1_gas_price = params.l1_gas_price(); + let l1_pubdata_price = params.l1_pubdata_price(); + + let FeeModelConfigV2 { + minimal_l2_gas_price, + compute_overhead_part, + pubdata_overhead_part, + batch_overhead_l1_gas, + max_gas_per_batch, + max_pubdata_per_batch, + } = config; + + // Firstly, we scale the gas price and pubdata price in case it is needed. + let l1_gas_price = (l1_gas_price as f64 * l1_gas_price_scale_factor) as u64; + let l1_pubdata_price = (l1_pubdata_price as f64 * l1_pubdata_price_scale_factor) as u64; + + // While the final results of the calculations are not expected to have any overflows, the intermediate computations + // might, so we use U256 for them. + let l1_batch_overhead_wei = U256::from(l1_gas_price) * U256::from(batch_overhead_l1_gas); + + let fair_l2_gas_price = { + // Firstly, we calculate which part of the overall overhead each unit of L2 gas should cover. + let l1_batch_overhead_per_gas = + ceil_div_u256(l1_batch_overhead_wei, U256::from(max_gas_per_batch)); + + // Then, we multiply by the `compute_overhead_part` to get the overhead for the computation for each gas. + // Also, this means that if we almost never close batches because of compute, the `compute_overhead_part` should be zero and so + // it is possible that the computation costs include for no overhead. + let gas_overhead_wei = + (l1_batch_overhead_per_gas.as_u64() as f64 * compute_overhead_part) as u64; + + // We sum up the minimal L2 gas price (i.e. the raw prover/compute cost of a single L2 gas) and the overhead for batch being closed. + minimal_l2_gas_price + gas_overhead_wei + }; + + let fair_pubdata_price = { + // Firstly, we calculate which part of the overall overhead each pubdata byte should cover. + let l1_batch_overhead_per_pubdata = + ceil_div_u256(l1_batch_overhead_wei, U256::from(max_pubdata_per_batch)); + + // Then, we multiply by the `pubdata_overhead_part` to get the overhead for each pubdata byte. + // Also, this means that if we almost never close batches because of pubdata, the `pubdata_overhead_part` should be zero and so + // it is possible that the pubdata costs include no overhead. + let pubdata_overhead_wei = + (l1_batch_overhead_per_pubdata.as_u64() as f64 * pubdata_overhead_part) as u64; + + // We sum up the raw L1 pubdata price (i.e. the expected price of publishing a single pubdata byte) and the overhead for batch being closed. + l1_pubdata_price + pubdata_overhead_wei + }; + + PubdataIndependentBatchFeeModelInput { + l1_gas_price, + fair_l2_gas_price, + fair_pubdata_price, + } +} + +/// Bootloader places limitations on fair_l2_gas_price and fair_pubdata_price. +/// (MAX_ALLOWED_FAIR_L2_GAS_PRICE and MAX_ALLOWED_FAIR_PUBDATA_PRICE in bootloader code respectively) +/// Server needs to clip this prices in order to allow chain continues operation at a loss. The alternative +/// would be to stop accepting the transactions until the conditions improve. +/// TODO (PE-153): to be removed when bootloader limitation is removed +fn clip_batch_fee_model_input_v2( + fee_model: PubdataIndependentBatchFeeModelInput, +) -> PubdataIndependentBatchFeeModelInput { + /// MAX_ALLOWED_FAIR_L2_GAS_PRICE + const MAXIMUM_L2_GAS_PRICE: u64 = 10_000_000_000_000; + /// MAX_ALLOWED_FAIR_PUBDATA_PRICE + const MAXIMUM_PUBDATA_PRICE: u64 = 1_000_000_000_000_000; + PubdataIndependentBatchFeeModelInput { + l1_gas_price: fee_model.l1_gas_price, + fair_l2_gas_price: if fee_model.fair_l2_gas_price < MAXIMUM_L2_GAS_PRICE { + fee_model.fair_l2_gas_price + } else { + tracing::warn!( + "Fair l2 gas price {} exceeds maximum. Limitting to {}", + fee_model.fair_l2_gas_price, + MAXIMUM_L2_GAS_PRICE + ); + MAXIMUM_L2_GAS_PRICE + }, + fair_pubdata_price: if fee_model.fair_pubdata_price < MAXIMUM_PUBDATA_PRICE { + fee_model.fair_pubdata_price + } else { + tracing::warn!( + "Fair pubdata price {} exceeds maximum. Limitting to {}", + fee_model.fair_pubdata_price, + MAXIMUM_PUBDATA_PRICE + ); + MAXIMUM_PUBDATA_PRICE + }, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // To test that overflow never happens, we'll use giant L1 gas price, i.e. + // almost realistic very large value of 100k gwei. Since it is so large, we'll also + // use it for the L1 pubdata price. + const GWEI: u64 = 1_000_000_000; + const GIANT_L1_GAS_PRICE: u64 = 100_000 * GWEI; + + // As a small L2 gas price we'll use the value of 1 wei. + const SMALL_L1_GAS_PRICE: u64 = 1; + + #[test] + fn test_compute_batch_fee_model_input_v2_giant_numbers() { + let config = FeeModelConfigV2 { + minimal_l2_gas_price: GIANT_L1_GAS_PRICE, + // We generally don't expect those values to be larger than 1. Still, in theory the operator + // may need to set higher values in extreme cases. + compute_overhead_part: 5.0, + pubdata_overhead_part: 5.0, + // The batch overhead would likely never grow beyond that + batch_overhead_l1_gas: 1_000_000, + // Let's imagine that for some reason the limit is relatively small + max_gas_per_batch: 50_000_000, + // The pubdata will likely never go below that + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + GIANT_L1_GAS_PRICE, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + // We'll use scale factor of 3.0 + let input = compute_batch_fee_model_input_v2(params, 3.0, 3.0); + + assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE * 3); + assert_eq!(input.fair_l2_gas_price, 130_000_000_000_000); + assert_eq!(input.fair_pubdata_price, 15_300_000_000_000_000); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_small_numbers() { + // Here we assume that the operator wants to make the lives of users as cheap as possible. + let config = FeeModelConfigV2 { + minimal_l2_gas_price: SMALL_L1_GAS_PRICE, + compute_overhead_part: 0.0, + pubdata_overhead_part: 0.0, + batch_overhead_l1_gas: 0, + max_gas_per_batch: 50_000_000, + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + SMALL_L1_GAS_PRICE, + SMALL_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + let input = + clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); + + assert_eq!(input.l1_gas_price, SMALL_L1_GAS_PRICE); + assert_eq!(input.fair_l2_gas_price, SMALL_L1_GAS_PRICE); + assert_eq!(input.fair_pubdata_price, SMALL_L1_GAS_PRICE); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_only_pubdata_overhead() { + // Here we use sensible config, but when only pubdata is used to close the batch + let config = FeeModelConfigV2 { + minimal_l2_gas_price: 100_000_000_000, + compute_overhead_part: 0.0, + pubdata_overhead_part: 1.0, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + GIANT_L1_GAS_PRICE, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + let input = + clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); + assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); + // The fair L2 gas price is identical to the minimal one. + assert_eq!(input.fair_l2_gas_price, 100_000_000_000); + // The fair pubdata price is the minimal one plus the overhead. + assert_eq!(input.fair_pubdata_price, 800_000_000_000_000); + } + + #[test] + fn test_compute_baxtch_fee_model_input_v2_only_compute_overhead() { + // Here we use sensible config, but when only compute is used to close the batch + let config = FeeModelConfigV2 { + minimal_l2_gas_price: 100_000_000_000, + compute_overhead_part: 1.0, + pubdata_overhead_part: 0.0, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + GIANT_L1_GAS_PRICE, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + let input = compute_batch_fee_model_input_v2(params, 1.0, 1.0); + assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); + // The fair L2 gas price is identical to the minimal one, plus the overhead + assert_eq!(input.fair_l2_gas_price, 240_000_000_000); + // The fair pubdata price is equal to the original one. + assert_eq!(input.fair_pubdata_price, GIANT_L1_GAS_PRICE); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_param_tweaking() { + // In this test we generally checking that each param behaves as expected + let base_config = FeeModelConfigV2 { + minimal_l2_gas_price: 100_000_000_000, + compute_overhead_part: 0.5, + pubdata_overhead_part: 0.5, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let base_params = FeeParamsV2::new( + base_config, + 1_000_000_000, + 1_000_000_000, + BaseTokenConversionRatio::default(), + ); + + let base_input = compute_batch_fee_model_input_v2(base_params, 1.0, 1.0); + + let base_input_larger_l1_gas_price = compute_batch_fee_model_input_v2( + FeeParamsV2::new( + base_config, + 2_000_000_000, // double the L1 gas price + 1_000_000_000, + BaseTokenConversionRatio::default(), + ), + 1.0, + 1.0, + ); + let base_input_scaled_l1_gas_price = + compute_batch_fee_model_input_v2(base_params, 2.0, 1.0); + assert_eq!( + base_input_larger_l1_gas_price, base_input_scaled_l1_gas_price, + "Scaling has the correct effect for the L1 gas price" + ); + assert!( + base_input.fair_l2_gas_price < base_input_larger_l1_gas_price.fair_l2_gas_price, + "L1 gas price increase raises L2 gas price" + ); + assert!( + base_input.fair_pubdata_price < base_input_larger_l1_gas_price.fair_pubdata_price, + "L1 gas price increase raises pubdata price" + ); + + let base_input_larger_pubdata_price = compute_batch_fee_model_input_v2( + FeeParamsV2::new( + base_config, + 1_000_000_000, + 2_000_000_000, // double the L1 pubdata price + BaseTokenConversionRatio::default(), + ), + 1.0, + 1.0, + ); + let base_input_scaled_pubdata_price = + compute_batch_fee_model_input_v2(base_params, 1.0, 2.0); + assert_eq!( + base_input_larger_pubdata_price, base_input_scaled_pubdata_price, + "Scaling has the correct effect for the pubdata price" + ); + assert_eq!( + base_input.fair_l2_gas_price, base_input_larger_pubdata_price.fair_l2_gas_price, + "L1 pubdata increase has no effect on L2 gas price" + ); + assert!( + base_input.fair_pubdata_price < base_input_larger_pubdata_price.fair_pubdata_price, + "Pubdata price increase raises pubdata price" + ); + + let base_input_larger_max_gas = compute_batch_fee_model_input_v2( + FeeParamsV2::new( + FeeModelConfigV2 { + max_gas_per_batch: base_config.max_gas_per_batch * 2, + ..base_config + }, + base_params.l1_gas_price(), + base_params.l1_pubdata_price(), + BaseTokenConversionRatio::default(), + ), + 1.0, + 1.0, + ); + assert!( + base_input.fair_l2_gas_price > base_input_larger_max_gas.fair_l2_gas_price, + "Max gas increase lowers L2 gas price" + ); + assert_eq!( + base_input.fair_pubdata_price, base_input_larger_max_gas.fair_pubdata_price, + "Max gas increase has no effect on pubdata price" + ); + + let base_input_larger_max_pubdata = compute_batch_fee_model_input_v2( + FeeParamsV2::new( + FeeModelConfigV2 { + max_pubdata_per_batch: base_config.max_pubdata_per_batch * 2, + ..base_config + }, + base_params.l1_gas_price(), + base_params.l1_pubdata_price(), + BaseTokenConversionRatio::default(), + ), + 1.0, + 1.0, + ); + assert_eq!( + base_input.fair_l2_gas_price, base_input_larger_max_pubdata.fair_l2_gas_price, + "Max pubdata increase has no effect on L2 gas price" + ); + assert!( + base_input.fair_pubdata_price > base_input_larger_max_pubdata.fair_pubdata_price, + "Max pubdata increase lowers pubdata price" + ); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_gas_price_over_limit_due_to_l1_gas() { + // In this test we check the gas price limit works as expected + let config = FeeModelConfigV2 { + minimal_l2_gas_price: 100 * GWEI, + compute_overhead_part: 0.5, + pubdata_overhead_part: 0.5, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let l1_gas_price = 1_000_000_000 * GWEI; + let params = FeeParamsV2::new( + config, + l1_gas_price, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + let input = + clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); + assert_eq!(input.l1_gas_price, l1_gas_price); + // The fair L2 gas price is identical to the maximum + assert_eq!(input.fair_l2_gas_price, 10_000 * GWEI); + assert_eq!(input.fair_pubdata_price, 1_000_000 * GWEI); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_gas_price_over_limit_due_to_conversion_rate() { + // In this test we check the gas price limit works as expected + let config = FeeModelConfigV2 { + minimal_l2_gas_price: GWEI, + compute_overhead_part: 0.5, + pubdata_overhead_part: 0.5, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + GWEI, + 2 * GWEI, + BaseTokenConversionRatio { + numerator: NonZeroU64::new(3_000_000).unwrap(), + denominator: NonZeroU64::new(1).unwrap(), + }, + ); + + let input = + clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); + assert_eq!(input.l1_gas_price, 3_000_000 * GWEI); + // The fair L2 gas price is identical to the maximum + assert_eq!(input.fair_l2_gas_price, 10_000 * GWEI); + assert_eq!(input.fair_pubdata_price, 1_000_000 * GWEI); + } } diff --git a/core/lib/types/src/l1/mod.rs b/core/lib/types/src/l1/mod.rs index e8144c75db2e..33225dd6b0c9 100644 --- a/core/lib/types/src/l1/mod.rs +++ b/core/lib/types/src/l1/mod.rs @@ -1,22 +1,21 @@ //! Definition of ZKsync network priority operations: operations initiated from the L1. -use std::convert::TryFrom; - use serde::{Deserialize, Serialize}; -use zksync_basic_types::{web3::Log, Address, L1BlockNumber, PriorityOpId, H256, U256}; -use zksync_utils::{ - address_to_u256, bytecode::hash_bytecode, h256_to_u256, u256_to_account_address, -}; use super::Transaction; use crate::{ - abi, ethabi, + abi, address_to_u256, + bytecode::BytecodeHash, + ethabi, helpers::unix_timestamp_ms, l1::error::L1TxParseError, l2::TransactionType, priority_op_onchain_data::{PriorityOpOnchainData, PriorityOpOnchainMetadata}, tx::Execute, - ExecuteTransactionCommon, PRIORITY_OPERATION_L2_TX_TYPE, PROTOCOL_UPGRADE_TX_TYPE, + u256_to_address, + web3::Log, + Address, ExecuteTransactionCommon, L1BlockNumber, PriorityOpId, H256, + PRIORITY_OPERATION_L2_TX_TYPE, PROTOCOL_UPGRADE_TX_TYPE, U256, }; pub mod error; @@ -294,7 +293,7 @@ impl From for abi::NewPriorityRequest { signature: vec![], factory_deps: factory_deps .iter() - .map(|b| h256_to_u256(hash_bytecode(b))) + .map(|b| BytecodeHash::for_bytecode(b).value_u256()) .collect(), paymaster_input: vec![], reserved_dynamic: vec![], @@ -319,7 +318,7 @@ impl TryFrom for L1Tx { let factory_deps_hashes: Vec<_> = req .factory_deps .iter() - .map(|b| h256_to_u256(hash_bytecode(b))) + .map(|b| BytecodeHash::for_bytecode(b).value_u256()) .collect(); anyhow::ensure!(req.transaction.factory_deps == factory_deps_hashes); for item in &req.transaction.reserved[2..] { @@ -332,10 +331,10 @@ impl TryFrom for L1Tx { let common_data = L1TxCommonData { serial_id: PriorityOpId(req.transaction.nonce.try_into().unwrap()), canonical_tx_hash: H256::from_slice(&req.tx_hash), - sender: u256_to_account_address(&req.transaction.from), + sender: u256_to_address(&req.transaction.from), layer_2_tip_fee: U256::zero(), to_mint: req.transaction.reserved[0], - refund_recipient: u256_to_account_address(&req.transaction.reserved[1]), + refund_recipient: u256_to_address(&req.transaction.reserved[1]), full_fee: U256::zero(), gas_limit: req.transaction.gas_limit, max_fee_per_gas: req.transaction.max_fee_per_gas, @@ -347,7 +346,7 @@ impl TryFrom for L1Tx { }; let execute = Execute { - contract_address: Some(u256_to_account_address(&req.transaction.to)), + contract_address: Some(u256_to_address(&req.transaction.to)), calldata: req.transaction.data, factory_deps: req.factory_deps, value: req.transaction.value, diff --git a/core/lib/types/src/l2/mod.rs b/core/lib/types/src/l2/mod.rs index 48e813e571d2..e7d582ab17a1 100644 --- a/core/lib/types/src/l2/mod.rs +++ b/core/lib/types/src/l2/mod.rs @@ -396,6 +396,13 @@ impl From for api::Transaction { } else { (None, None, None) }; + // Legacy transactions are not supposed to have `yParity` and are reliant on `v` instead. + // Other transactions are required to have `yParity` which replaces the deprecated `v` value + // (still included for backwards compatibility). + let y_parity = match tx.common_data.transaction_type { + TransactionType::LegacyTransaction => None, + _ => v, + }; Self { hash: tx.hash(), @@ -409,6 +416,7 @@ impl From for api::Transaction { max_fee_per_gas: Some(tx.common_data.fee.max_fee_per_gas), gas: tx.common_data.fee.gas_limit, input: Bytes(tx.execute.calldata), + y_parity, v, r, s, diff --git a/core/lib/types/src/l2_to_l1_log.rs b/core/lib/types/src/l2_to_l1_log.rs index 59ade8873cd1..1b84a79024c7 100644 --- a/core/lib/types/src/l2_to_l1_log.rs +++ b/core/lib/types/src/l2_to_l1_log.rs @@ -1,5 +1,5 @@ use serde::{Deserialize, Serialize}; -use zksync_system_constants::{BLOB1_LINEAR_HASH_KEY, PUBDATA_CHUNK_PUBLISHER_ADDRESS}; +use zksync_system_constants::{BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY, PUBDATA_CHUNK_PUBLISHER_ADDRESS}; use crate::{ blob::{num_blobs_created, num_blobs_required}, @@ -80,10 +80,15 @@ pub fn l2_to_l1_logs_tree_size(protocol_version: ProtocolVersionId) -> usize { } /// Returns the blob hashes parsed out from the system logs -pub fn parse_system_logs_for_blob_hashes( +pub fn parse_system_logs_for_blob_hashes_pre_gateway( protocol_version: &ProtocolVersionId, system_logs: &[SystemL2ToL1Log], ) -> Vec { + assert!( + protocol_version.is_pre_gateway(), + "Cannot parse blob linear hashes from system logs for post gateway" + ); + let num_required_blobs = num_blobs_required(protocol_version) as u32; let num_created_blobs = num_blobs_created(protocol_version) as u32; @@ -95,9 +100,11 @@ pub fn parse_system_logs_for_blob_hashes( .iter() .filter(|log| { log.0.sender == PUBDATA_CHUNK_PUBLISHER_ADDRESS - && log.0.key >= H256::from_low_u64_be(BLOB1_LINEAR_HASH_KEY as u64) + && log.0.key >= H256::from_low_u64_be(BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY as u64) && log.0.key - < H256::from_low_u64_be((BLOB1_LINEAR_HASH_KEY + num_created_blobs) as u64) + < H256::from_low_u64_be( + (BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY + num_created_blobs) as u64, + ) }) .map(|log| (log.0.key, log.0.value)) .collect::>(); @@ -110,11 +117,10 @@ pub fn parse_system_logs_for_blob_hashes( #[cfg(test)] mod tests { - use zksync_basic_types::U256; use zksync_system_constants::L1_MESSENGER_ADDRESS; - use zksync_utils::u256_to_h256; - use super::L2ToL1Log; + use super::*; + use crate::{u256_to_h256, U256}; #[test] fn l2_to_l1_log_to_bytes() { diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index a50fc8a655b7..8ec98ec0571e 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -5,7 +5,7 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] -use std::fmt; +use std::{fmt, ops::Range}; use anyhow::Context as _; use fee::encoding_len; @@ -15,11 +15,9 @@ pub use protocol_upgrade::{ProtocolUpgrade, ProtocolVersion}; use serde::{Deserialize, Serialize}; pub use storage::*; pub use tx::Execute; +use zksync_basic_types::bytecode::BytecodeHash; pub use zksync_basic_types::{protocol_version::ProtocolVersionId, vm, *}; pub use zksync_crypto_primitives::*; -use zksync_utils::{ - address_to_u256, bytecode::hash_bytecode, h256_to_u256, u256_to_account_address, -}; use crate::{ l2::{L2Tx, TransactionType}, @@ -43,7 +41,6 @@ pub mod l2; pub mod l2_to_l1_log; pub mod priority_op_onchain_data; pub mod protocol_upgrade; -pub mod pubdata_da; pub mod snapshots; pub mod storage; pub mod system_contracts; @@ -287,7 +284,7 @@ impl TryFrom for abi::Transaction { signature: vec![], factory_deps: factory_deps .iter() - .map(|b| h256_to_u256(hash_bytecode(b))) + .map(|b| BytecodeHash::for_bytecode(b).value_u256()) .collect(), paymaster_input: vec![], reserved_dynamic: vec![], @@ -318,7 +315,7 @@ impl TryFrom for abi::Transaction { signature: vec![], factory_deps: factory_deps .iter() - .map(|b| h256_to_u256(hash_bytecode(b))) + .map(|b| BytecodeHash::for_bytecode(b).value_u256()) .collect(), paymaster_input: vec![], reserved_dynamic: vec![], @@ -347,7 +344,7 @@ impl Transaction { } => { let factory_deps_hashes: Vec<_> = factory_deps .iter() - .map(|b| h256_to_u256(hash_bytecode(b))) + .map(|b| BytecodeHash::for_bytecode(b).value_u256()) .collect(); anyhow::ensure!(tx.factory_deps == factory_deps_hashes); for item in &tx.reserved[2..] { @@ -369,10 +366,10 @@ impl Transaction { .map_err(|err| anyhow::format_err!("{err}"))?, ), canonical_tx_hash: hash, - sender: u256_to_account_address(&tx.from), + sender: u256_to_address(&tx.from), layer_2_tip_fee: U256::zero(), to_mint: tx.reserved[0], - refund_recipient: u256_to_account_address(&tx.reserved[1]), + refund_recipient: u256_to_address(&tx.reserved[1]), full_fee: U256::zero(), gas_limit: tx.gas_limit, max_fee_per_gas: tx.max_fee_per_gas, @@ -386,9 +383,9 @@ impl Transaction { ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { upgrade_id: tx.nonce.try_into().unwrap(), canonical_tx_hash: hash, - sender: u256_to_account_address(&tx.from), + sender: u256_to_address(&tx.from), to_mint: tx.reserved[0], - refund_recipient: u256_to_account_address(&tx.reserved[1]), + refund_recipient: u256_to_address(&tx.reserved[1]), gas_limit: tx.gas_limit, max_fee_per_gas: tx.max_fee_per_gas, gas_per_pubdata_limit: tx.gas_per_pubdata_byte_limit, @@ -398,7 +395,7 @@ impl Transaction { unknown_type => anyhow::bail!("unknown tx type {unknown_type}"), }, execute: Execute { - contract_address: Some(u256_to_account_address(&tx.to)), + contract_address: Some(u256_to_address(&tx.to)), calldata: tx.data, factory_deps, value: tx.value, @@ -417,3 +414,8 @@ impl Transaction { }) } } + +#[derive(Clone, Serialize, Debug, Default, Eq, PartialEq, Hash)] +pub struct TransactionTimeRangeConstraint { + pub timestamp_asserter_range: Option>, +} diff --git a/core/lib/types/src/protocol_upgrade.rs b/core/lib/types/src/protocol_upgrade.rs index 48f26dfd5c7f..7d8f678fa851 100644 --- a/core/lib/types/src/protocol_upgrade.rs +++ b/core/lib/types/src/protocol_upgrade.rs @@ -12,11 +12,10 @@ use zksync_contracts::{ BaseSystemContractsHashes, ADMIN_EXECUTE_UPGRADE_FUNCTION, ADMIN_UPGRADE_CHAIN_FROM_VERSION_FUNCTION, DIAMOND_CUT, }; -use zksync_utils::h256_to_u256; use crate::{ - abi, ethabi::ParamType, web3::Log, Address, Execute, ExecuteTransactionCommon, Transaction, - TransactionType, H256, U256, + abi, ethabi::ParamType, h256_to_u256, web3::Log, Address, Execute, ExecuteTransactionCommon, + Transaction, TransactionType, H256, U256, }; /// Represents a call to be made during governance operation. diff --git a/core/lib/types/src/snapshots.rs b/core/lib/types/src/snapshots.rs index 156d1e4723dd..b9ee62ab24ec 100644 --- a/core/lib/types/src/snapshots.rs +++ b/core/lib/types/src/snapshots.rs @@ -5,9 +5,8 @@ use num_enum::{IntoPrimitive, TryFromPrimitive}; use serde::{Deserialize, Serialize}; use zksync_basic_types::{AccountTreeId, L1BatchNumber, L2BlockNumber, H256}; use zksync_protobuf::{required, ProtoFmt}; -use zksync_utils::u256_to_h256; -use crate::{utils, web3::Bytes, ProtocolVersionId, StorageKey, StorageValue, U256}; +use crate::{u256_to_h256, utils, web3::Bytes, ProtocolVersionId, StorageKey, StorageValue, U256}; /// Information about all snapshots persisted by the node. #[derive(Debug, Clone, Serialize, Deserialize)] @@ -331,9 +330,8 @@ pub fn uniform_hashed_keys_chunk(chunk_id: u64, chunk_count: u64) -> ops::RangeI #[cfg(test)] mod tests { - use zksync_utils::h256_to_u256; - use super::*; + use crate::h256_to_u256; #[test] fn chunking_is_correct() { diff --git a/core/lib/types/src/storage/log.rs b/core/lib/types/src/storage/log.rs index a05e25abccb5..075a05781b67 100644 --- a/core/lib/types/src/storage/log.rs +++ b/core/lib/types/src/storage/log.rs @@ -2,10 +2,10 @@ use std::mem; use serde::{Deserialize, Serialize}; use zksync_basic_types::AccountTreeId; -use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::{ api::ApiStorageLog, + h256_to_u256, u256_to_h256, zk_evm_types::{self, LogQuery, Timestamp}, StorageKey, StorageValue, U256, }; diff --git a/core/lib/types/src/storage/mod.rs b/core/lib/types/src/storage/mod.rs index 9ef037dc29b2..84a29ed8c039 100644 --- a/core/lib/types/src/storage/mod.rs +++ b/core/lib/types/src/storage/mod.rs @@ -5,9 +5,8 @@ pub use log::*; use serde::{Deserialize, Serialize}; use zksync_basic_types::{web3::keccak256, L2ChainId}; pub use zksync_system_constants::*; -use zksync_utils::{address_to_h256, u256_to_h256}; -use crate::{AccountTreeId, Address, H160, H256, U256}; +use crate::{address_to_h256, u256_to_h256, AccountTreeId, Address, H160, H256, U256}; pub mod log; pub mod witness_block_state; diff --git a/core/lib/types/src/storage/witness_block_state.rs b/core/lib/types/src/storage/witness_block_state.rs index bce9cc9034d7..7f3195af873f 100644 --- a/core/lib/types/src/storage/witness_block_state.rs +++ b/core/lib/types/src/storage/witness_block_state.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::{StorageKey, StorageValue}; /// Storage data used during Witness Generation. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, PartialEq)] pub struct WitnessStorageState { pub read_storage_key: HashMap, pub is_write_initial: HashMap, diff --git a/core/lib/types/src/system_contracts.rs b/core/lib/types/src/system_contracts.rs index 4329680991c8..4d1ff9b554ea 100644 --- a/core/lib/types/src/system_contracts.rs +++ b/core/lib/types/src/system_contracts.rs @@ -151,7 +151,7 @@ static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 26] = [ "", "EvmGasManager", EVM_GAS_MANAGER_ADDRESS, - ContractLanguage::Sol, + ContractLanguage::Yul, ), // For now, only zero address and the bootloader address have empty bytecode at the init // In the future, we might want to set all of the system contracts this way. diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index a8713f301ba6..db66c6955bda 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -3,21 +3,18 @@ use std::convert::{TryFrom, TryInto}; use rlp::{DecoderError, Rlp, RlpStream}; use serde::{Deserialize, Serialize}; use thiserror::Error; -use zksync_basic_types::H256; use zksync_system_constants::{DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE, MAX_ENCODED_TX_SIZE}; -use zksync_utils::{ - bytecode::{hash_bytecode, validate_bytecode, InvalidBytecodeError}, - concat_and_hash, u256_to_h256, -}; use super::{EIP_1559_TX_TYPE, EIP_2930_TX_TYPE, EIP_712_TX_TYPE}; use crate::{ + bytecode::{validate_bytecode, BytecodeHash, InvalidBytecodeError}, fee::Fee, l1::L1Tx, l2::{L2Tx, TransactionType}, - web3::{keccak256, AccessList, Bytes}, + u256_to_h256, + web3::{keccak256, keccak256_concat, AccessList, Bytes}, Address, EIP712TypedStructure, Eip712Domain, L1TxCommonData, L2ChainId, Nonce, - PackedEthSignature, StructBuilder, LEGACY_TX_TYPE, U256, U64, + PackedEthSignature, StructBuilder, H256, LEGACY_TX_TYPE, U256, U64, }; /// Call contract request (eth_call / eth_estimateGas) @@ -176,7 +173,7 @@ impl CallRequestBuilder { } } -#[derive(Debug, Error, PartialEq)] +#[derive(Debug, Error)] pub enum SerializationTransactionError { #[error("transaction type is not supported")] UnknownTransactionFormat, @@ -355,7 +352,7 @@ impl EIP712TypedStructure for TransactionRequest { let factory_dep_hashes: Vec<_> = self .get_factory_deps() .into_iter() - .map(|dep| hash_bytecode(&dep)) + .map(|dep| BytecodeHash::for_bytecode(&dep).value()) .collect(); builder.add_member("factoryDeps", &factory_dep_hashes.as_slice()); @@ -732,7 +729,7 @@ impl TransactionRequest { signed_message: H256, ) -> Result, SerializationTransactionError> { if self.is_eip712_tx() { - return Ok(Some(concat_and_hash( + return Ok(Some(keccak256_concat( signed_message, H256(keccak256(&self.get_signature()?)), ))); @@ -1160,9 +1157,9 @@ mod tests { let decoded_tx = TransactionRequest::from_bytes(encoded_tx.as_slice(), L2ChainId::from(272)); - assert_eq!( - decoded_tx, - Err(SerializationTransactionError::WrongChainId(Some(270))) + assert_matches!( + decoded_tx.unwrap_err(), + SerializationTransactionError::WrongChainId(Some(270)) ); } @@ -1238,9 +1235,9 @@ mod tests { data.insert(0, EIP_1559_TX_TYPE); let decoded_tx = TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)); - assert_eq!( - decoded_tx, - Err(SerializationTransactionError::WrongChainId(Some(272))) + assert_matches!( + decoded_tx.unwrap_err(), + SerializationTransactionError::WrongChainId(Some(272)) ); } @@ -1278,9 +1275,9 @@ mod tests { data.insert(0, EIP_1559_TX_TYPE); let res = TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)); - assert_eq!( - res, - Err(SerializationTransactionError::AccessListsNotSupported) + assert_matches!( + res.unwrap_err(), + SerializationTransactionError::AccessListsNotSupported ); } @@ -1315,9 +1312,9 @@ mod tests { data.insert(0, EIP_2930_TX_TYPE); let res = TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)); - assert_eq!( - res, - Err(SerializationTransactionError::AccessListsNotSupported) + assert_matches!( + res.unwrap_err(), + SerializationTransactionError::AccessListsNotSupported ); } @@ -1343,7 +1340,7 @@ mod tests { }; let execute_tx2: Result = L2Tx::from_request(tx2, usize::MAX, true); - assert_eq!( + assert_matches!( execute_tx2.unwrap_err(), SerializationTransactionError::TooBigNonce ); @@ -1360,7 +1357,7 @@ mod tests { }; let execute_tx1: Result = L2Tx::from_request(tx1, usize::MAX, true); - assert_eq!( + assert_matches!( execute_tx1.unwrap_err(), SerializationTransactionError::MaxFeePerGasNotU64 ); @@ -1374,7 +1371,7 @@ mod tests { }; let execute_tx2: Result = L2Tx::from_request(tx2, usize::MAX, true); - assert_eq!( + assert_matches!( execute_tx2.unwrap_err(), SerializationTransactionError::MaxPriorityFeePerGasNotU64 ); @@ -1392,7 +1389,7 @@ mod tests { let execute_tx3: Result = L2Tx::from_request(tx3, usize::MAX, true); - assert_eq!( + assert_matches!( execute_tx3.unwrap_err(), SerializationTransactionError::MaxFeePerPubdataByteNotU64 ); diff --git a/core/lib/types/src/tx/execute.rs b/core/lib/types/src/tx/execute.rs index 0edece9e46b4..d36f4b6521ee 100644 --- a/core/lib/types/src/tx/execute.rs +++ b/core/lib/types/src/tx/execute.rs @@ -1,9 +1,12 @@ use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; +use zksync_basic_types::bytecode::BytecodeHash; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_utils::{bytecode::hash_bytecode, ZeroPrefixHexSerde}; -use crate::{ethabi, Address, EIP712TypedStructure, StructBuilder, H256, U256}; +use crate::{ + ethabi, serde_wrappers::ZeroPrefixHexSerde, Address, EIP712TypedStructure, StructBuilder, H256, + U256, +}; /// This struct is the `serde` schema for the `Execute` struct. /// It allows us to modify `Execute` struct without worrying @@ -124,7 +127,7 @@ impl Execute { contract_bytecode: Vec, constructor_input: &[ethabi::Token], ) -> Self { - let bytecode_hash = hash_bytecode(&contract_bytecode); + let bytecode_hash = BytecodeHash::for_bytecode(&contract_bytecode).value(); Self { contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata: Self::encode_deploy_params_create( @@ -136,4 +139,14 @@ impl Execute { factory_deps: vec![contract_bytecode], } } + + /// Creates an instance for transferring base token to the specified recipient. + pub fn transfer(to: Address, value: U256) -> Self { + Self { + contract_address: Some(to), + calldata: vec![], + value, + factory_deps: vec![], + } + } } diff --git a/core/lib/types/src/utils.rs b/core/lib/types/src/utils.rs index bf086d6cdcd4..56a8ccf9fe9f 100644 --- a/core/lib/types/src/utils.rs +++ b/core/lib/types/src/utils.rs @@ -2,11 +2,10 @@ use std::fmt; use chrono::{DateTime, TimeZone, Utc}; use zksync_basic_types::{Address, H256}; -use zksync_utils::{address_to_h256, u256_to_h256}; use crate::{ - system_contracts::DEPLOYMENT_NONCE_INCREMENT, web3::keccak256, AccountTreeId, StorageKey, - L2_BASE_TOKEN_ADDRESS, U256, + address_to_h256, system_contracts::DEPLOYMENT_NONCE_INCREMENT, u256_to_h256, web3::keccak256, + AccountTreeId, StorageKey, L2_BASE_TOKEN_ADDRESS, U256, }; /// Displays a Unix timestamp (seconds since epoch) in human-readable form. Useful for logging. diff --git a/core/lib/utils/Cargo.toml b/core/lib/utils/Cargo.toml index b87b2ad98964..216f3b12d426 100644 --- a/core/lib/utils/Cargo.toml +++ b/core/lib/utils/Cargo.toml @@ -11,25 +11,16 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_basic_types.workspace = true -zk_evm.workspace = true zksync_vlog.workspace = true -bigdecimal.workspace = true -num = { workspace = true, features = ["serde"] } -serde = { workspace = true, features = ["derive"] } tokio = { workspace = true, features = ["time"] } tracing.workspace = true anyhow.workspace = true -thiserror.workspace = true futures.workspace = true -hex.workspace = true reqwest = { workspace = true, features = ["blocking"] } serde_json.workspace = true once_cell.workspace = true [dev-dependencies] -rand.workspace = true tokio = { workspace = true, features = ["macros", "rt"] } -bincode.workspace = true assert_matches.workspace = true diff --git a/core/lib/utils/src/bytecode.rs b/core/lib/utils/src/bytecode.rs deleted file mode 100644 index 01cce5bc34d0..000000000000 --- a/core/lib/utils/src/bytecode.rs +++ /dev/null @@ -1,118 +0,0 @@ -// FIXME: move to basic_types? - -use zk_evm::k256::sha2::{Digest, Sha256}; -use zksync_basic_types::H256; - -use crate::bytes_to_chunks; - -const MAX_BYTECODE_LENGTH_IN_WORDS: usize = (1 << 16) - 1; -const MAX_BYTECODE_LENGTH_BYTES: usize = MAX_BYTECODE_LENGTH_IN_WORDS * 32; - -#[derive(Debug, thiserror::Error, PartialEq)] -pub enum InvalidBytecodeError { - #[error("Bytecode too long: {0} bytes, while max {1} allowed")] - BytecodeTooLong(usize, usize), - #[error("Bytecode has even number of 32-byte words")] - BytecodeLengthInWordsIsEven, - #[error("Bytecode length is not divisible by 32")] - BytecodeLengthIsNotDivisibleBy32, -} - -pub fn validate_bytecode(code: &[u8]) -> Result<(), InvalidBytecodeError> { - let bytecode_len = code.len(); - - if bytecode_len > MAX_BYTECODE_LENGTH_BYTES { - return Err(InvalidBytecodeError::BytecodeTooLong( - bytecode_len, - MAX_BYTECODE_LENGTH_BYTES, - )); - } - - if bytecode_len % 32 != 0 { - return Err(InvalidBytecodeError::BytecodeLengthIsNotDivisibleBy32); - } - - let bytecode_len_words = bytecode_len / 32; - - if bytecode_len_words % 2 == 0 { - return Err(InvalidBytecodeError::BytecodeLengthInWordsIsEven); - } - - Ok(()) -} - -/// Hashes the provided EraVM bytecode. -pub fn hash_bytecode(code: &[u8]) -> H256 { - let chunked_code = bytes_to_chunks(code); - let hash = zk_evm::zkevm_opcode_defs::utils::bytecode_to_code_hash(&chunked_code) - .expect("Invalid bytecode"); - - H256(hash) -} - -pub fn bytecode_len_in_words(bytecodehash: &H256) -> u16 { - u16::from_be_bytes([bytecodehash[2], bytecodehash[3]]) -} - -pub fn bytecode_len_in_bytes(bytecodehash: H256) -> usize { - bytecode_len_in_words(&bytecodehash) as usize * 32 -} - -/// Bytecode marker encoded in the first byte of the bytecode hash. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -#[repr(u8)] -pub enum BytecodeMarker { - /// EraVM bytecode marker (1). - EraVm = 1, - /// EVM bytecode marker (2). - Evm = 2, -} - -impl BytecodeMarker { - /// Parses a marker from the bytecode hash. - pub fn new(bytecode_hash: H256) -> Option { - Some(match bytecode_hash.as_bytes()[0] { - val if val == Self::EraVm as u8 => Self::EraVm, - val if val == Self::Evm as u8 => Self::Evm, - _ => return None, - }) - } -} - -/// Hashes the provided EVM bytecode. The bytecode must be padded to an odd number of 32-byte words; -/// bytecodes stored in the known codes storage satisfy this requirement automatically. -pub fn hash_evm_bytecode(bytecode: &[u8]) -> H256 { - validate_bytecode(bytecode).expect("invalid EVM bytecode"); - - let mut hasher = Sha256::new(); - let len = bytecode.len() as u16; - hasher.update(bytecode); - let result = hasher.finalize(); - - let mut output = [0u8; 32]; - output[..].copy_from_slice(result.as_slice()); - output[0] = BytecodeMarker::Evm as u8; - output[1] = 0; - output[2..4].copy_from_slice(&len.to_be_bytes()); - - H256(output) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn bytecode_markers_are_valid() { - let bytecode_hash = hash_bytecode(&[0; 32]); - assert_eq!( - BytecodeMarker::new(bytecode_hash), - Some(BytecodeMarker::EraVm) - ); - let bytecode_hash = hash_evm_bytecode(&[0; 32]); - assert_eq!( - BytecodeMarker::new(bytecode_hash), - Some(BytecodeMarker::Evm) - ); - } -} diff --git a/core/lib/utils/src/convert.rs b/core/lib/utils/src/convert.rs deleted file mode 100644 index e086e385c8ef..000000000000 --- a/core/lib/utils/src/convert.rs +++ /dev/null @@ -1,185 +0,0 @@ -use std::convert::TryInto; - -use bigdecimal::BigDecimal; -use num::BigUint; -use zksync_basic_types::{Address, H256, U256}; - -pub fn u256_to_big_decimal(value: U256) -> BigDecimal { - let mut u32_digits = vec![0_u32; 8]; - // `u64_digit`s from `U256` are little-endian - for (i, &u64_digit) in value.0.iter().enumerate() { - u32_digits[2 * i] = u64_digit as u32; - u32_digits[2 * i + 1] = (u64_digit >> 32) as u32; - } - let value = BigUint::new(u32_digits); - BigDecimal::new(value.into(), 0) -} - -/// Converts `BigUint` value into the corresponding `U256` value. -fn biguint_to_u256(value: BigUint) -> U256 { - let bytes = value.to_bytes_le(); - U256::from_little_endian(&bytes) -} - -/// Converts `BigDecimal` value into the corresponding `U256` value. -pub fn bigdecimal_to_u256(value: BigDecimal) -> U256 { - let bigint = value.with_scale(0).into_bigint_and_exponent().0; - biguint_to_u256(bigint.to_biguint().unwrap()) -} - -fn ensure_chunkable(bytes: &[u8]) { - assert!( - bytes.len() % 32 == 0, - "Bytes must be divisible by 32 to split into chunks" - ); -} - -pub fn h256_to_u256(num: H256) -> U256 { - U256::from_big_endian(num.as_bytes()) -} - -pub fn address_to_h256(address: &Address) -> H256 { - let mut buffer = [0u8; 32]; - buffer[12..].copy_from_slice(address.as_bytes()); - H256(buffer) -} - -pub fn address_to_u256(address: &Address) -> U256 { - h256_to_u256(address_to_h256(address)) -} - -pub fn bytes_to_chunks(bytes: &[u8]) -> Vec<[u8; 32]> { - ensure_chunkable(bytes); - bytes - .chunks(32) - .map(|el| { - let mut chunk = [0u8; 32]; - chunk.copy_from_slice(el); - chunk - }) - .collect() -} - -pub fn be_chunks_to_h256_words(chunks: Vec<[u8; 32]>) -> Vec { - chunks.into_iter().map(|el| H256::from_slice(&el)).collect() -} - -pub fn bytes_to_be_words(vec: Vec) -> Vec { - ensure_chunkable(&vec); - vec.chunks(32).map(U256::from_big_endian).collect() -} - -pub fn be_words_to_bytes(words: &[U256]) -> Vec { - words - .iter() - .flat_map(|w| { - let mut bytes = [0u8; 32]; - w.to_big_endian(&mut bytes); - bytes - }) - .collect() -} - -pub fn u256_to_h256(num: U256) -> H256 { - let mut bytes = [0u8; 32]; - num.to_big_endian(&mut bytes); - H256::from_slice(&bytes) -} - -/// Converts `U256` value into the Address -pub fn u256_to_account_address(value: &U256) -> Address { - let mut bytes = [0u8; 32]; - value.to_big_endian(&mut bytes); - - Address::from_slice(&bytes[12..]) -} - -/// Converts `H256` value into the Address -pub fn h256_to_account_address(value: &H256) -> Address { - Address::from_slice(&value.as_bytes()[12..]) -} - -pub fn be_bytes_to_safe_address(bytes: &[u8]) -> Option
{ - if bytes.len() < 20 { - return None; - } - - let (zero_bytes, address_bytes) = bytes.split_at(bytes.len() - 20); - - if zero_bytes.iter().any(|b| *b != 0) { - None - } else { - Some(Address::from_slice(address_bytes)) - } -} - -/// Converts `h256` value as BE into the u32 -pub fn h256_to_u32(value: H256) -> u32 { - let be_u32_bytes: [u8; 4] = value[28..].try_into().unwrap(); - u32::from_be_bytes(be_u32_bytes) -} - -/// Converts u32 into the H256 as BE bytes -pub fn u32_to_h256(value: u32) -> H256 { - let mut result = [0u8; 32]; - result[28..].copy_from_slice(&value.to_be_bytes()); - H256(result) -} - -/// Converts `U256` value into bytes array -pub fn u256_to_bytes_be(value: &U256) -> Vec { - let mut bytes = vec![0u8; 32]; - value.to_big_endian(bytes.as_mut_slice()); - bytes -} - -#[cfg(test)] -mod test { - use num::BigInt; - use rand::{rngs::StdRng, Rng, SeedableRng}; - - use super::*; - - #[test] - fn test_u256_to_bigdecimal() { - const RNG_SEED: u64 = 123; - - let mut rng = StdRng::seed_from_u64(RNG_SEED); - // Small values. - for _ in 0..10_000 { - let value: u64 = rng.gen(); - let expected = BigDecimal::from(value); - assert_eq!(u256_to_big_decimal(value.into()), expected); - } - - // Arbitrary values - for _ in 0..10_000 { - let u64_digits: [u64; 4] = rng.gen(); - let value = u64_digits - .iter() - .enumerate() - .map(|(i, &digit)| U256::from(digit) << (i * 64)) - .fold(U256::zero(), |acc, x| acc + x); - let expected_value = u64_digits - .iter() - .enumerate() - .map(|(i, &digit)| BigInt::from(digit) << (i * 64)) - .fold(BigInt::from(0), |acc, x| acc + x); - assert_eq!( - u256_to_big_decimal(value), - BigDecimal::new(expected_value, 0) - ); - } - } - - #[test] - fn test_bigdecimal_to_u256() { - let value = BigDecimal::from(100u32); - let expected = U256::from(100u32); - assert_eq!(bigdecimal_to_u256(value), expected); - - let value = BigDecimal::new(BigInt::from(100), -2); - let expected = U256::from(10000u32); - assert_eq!(bigdecimal_to_u256(value), expected); - } -} diff --git a/core/lib/utils/src/format.rs b/core/lib/utils/src/format.rs deleted file mode 100644 index 9d15d4c358e7..000000000000 --- a/core/lib/utils/src/format.rs +++ /dev/null @@ -1,78 +0,0 @@ -// Built-in deps -use std::collections::VecDeque; -use std::string::ToString; -// External deps -// Workspace deps - -/// Formats amount in wei to tokens with precision. -/// Behaves just like ethers.utils.formatUnits -pub fn format_units(wei: impl ToString, units: u8) -> String { - let mut chars: VecDeque = wei.to_string().chars().collect(); - - while chars.len() < units as usize { - chars.push_front('0'); - } - chars.insert(chars.len() - units as usize, '.'); - if *chars.front().unwrap() == '.' { - chars.push_front('0'); - } - while *chars.back().unwrap() == '0' { - chars.pop_back(); - } - if *chars.back().unwrap() == '.' { - chars.push_back('0'); - } - chars.iter().collect() -} - -/// Formats amount in wei to tokens. -/// Behaves just like js ethers.utils.formatEther -pub fn format_ether(wei: impl ToString) -> String { - format_units(wei, 18) -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn test_format_units() { - // Test vector of (decimals, wei input, expected output) - let vals = vec![ - (0, "1000000000000000100000", "1000000000000000100000.0"), - (1, "0", "0.0"), - (1, "11000000000000000000", "1100000000000000000.0"), - (2, "0", "0.0"), - (2, "1000000000000000100000", "10000000000000001000.0"), - (4, "10001000000", "1000100.0"), - (4, "10100000000000000000000", "1010000000000000000.0"), - (4, "110", "0.011"), - (6, "1000000000000000100000", "1000000000000000.1"), - (8, "0", "0.0"), - (8, "10100000000000000000000", "101000000000000.0"), - (8, "110", "0.0000011"), - (9, "10000000000000000001", "10000000000.000000001"), - (9, "11000000", "0.011"), - (9, "11000000000000000000", "11000000000.0"), - (10, "10001000000", "1.0001"), - (10, "20000000000000000000000", "2000000000000.0"), - (11, "0", "0.0"), - (11, "10100000000000000000000", "101000000000.0"), - (12, "1000000000000000100000", "1000000000.0000001"), - (12, "10001000000", "0.010001"), - (12, "10010000000", "0.01001"), - (12, "110", "0.00000000011"), - (13, "10010000000", "0.001001"), - (14, "10010000000", "0.0001001"), - (14, "110", "0.0000000000011"), - (15, "0", "0.0"), - (17, "1000000000000000100000", "10000.000000000001"), - (17, "10001000000", "0.00000010001"), - (18, "1000000000000000100000", "1000.0000000000001"), - ]; - - for (dec, input, output) in vals { - assert_eq!(format_units(&input, dec), output); - } - } -} diff --git a/core/lib/utils/src/lib.rs b/core/lib/utils/src/lib.rs index 92a1d7a0c470..85618a2e61ef 100644 --- a/core/lib/utils/src/lib.rs +++ b/core/lib/utils/src/lib.rs @@ -1,13 +1,6 @@ //! Various helpers used in the ZKsync stack. -pub mod bytecode; -mod convert; pub mod env; pub mod http_with_retries; -pub mod misc; pub mod panic_extractor; -mod serde_wrappers; -pub mod time; pub mod wait_for_tasks; - -pub use self::{convert::*, misc::*, serde_wrappers::*}; diff --git a/core/lib/utils/src/misc.rs b/core/lib/utils/src/misc.rs deleted file mode 100644 index 52bd7657c4e1..000000000000 --- a/core/lib/utils/src/misc.rs +++ /dev/null @@ -1,55 +0,0 @@ -use zksync_basic_types::{web3::keccak256, H256, U256}; - -pub const fn ceil_div(a: u64, b: u64) -> u64 { - if a == 0 { - a - } else { - (a - 1) / b + 1 - } -} - -pub fn ceil_div_u256(a: U256, b: U256) -> U256 { - (a + b - U256::from(1)) / b -} - -pub fn concat_and_hash(hash1: H256, hash2: H256) -> H256 { - let mut bytes = [0_u8; 64]; - bytes[..32].copy_from_slice(&hash1.0); - bytes[32..].copy_from_slice(&hash2.0); - H256(keccak256(&bytes)) -} - -pub fn expand_memory_contents(packed: &[(usize, U256)], memory_size_bytes: usize) -> Vec { - let mut result: Vec = vec![0; memory_size_bytes]; - - for (offset, value) in packed { - value.to_big_endian(&mut result[(offset * 32)..(offset + 1) * 32]); - } - - result -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_ceil_div_u64_max() { - assert_eq!(0, ceil_div(u64::MIN, u64::MAX)); - assert_eq!(1, ceil_div(u64::MAX, u64::MAX)); - } - - #[test] - fn test_ceil_div_roundup_required() { - assert_eq!(3, ceil_div(5, 2)); - assert_eq!(4, ceil_div(10, 3)); - assert_eq!(3, ceil_div(15, 7)); - } - - #[test] - fn test_ceil_div_no_roundup_required() { - assert_eq!(2, ceil_div(4, 2)); - assert_eq!(2, ceil_div(6, 3)); - assert_eq!(2, ceil_div(14, 7)); - } -} diff --git a/core/lib/utils/src/time.rs b/core/lib/utils/src/time.rs deleted file mode 100644 index 70372db34f49..000000000000 --- a/core/lib/utils/src/time.rs +++ /dev/null @@ -1,19 +0,0 @@ -use std::time::{Duration, SystemTime, UNIX_EPOCH}; - -pub fn seconds_since_epoch() -> u64 { - duration_since_epoch().as_secs() -} - -pub fn millis_since(since: u64) -> u64 { - (millis_since_epoch() - since as u128 * 1000) as u64 -} - -pub fn millis_since_epoch() -> u128 { - duration_since_epoch().as_millis() -} - -fn duration_since_epoch() -> Duration { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Incorrect system time") -} diff --git a/core/lib/vm_executor/Cargo.toml b/core/lib/vm_executor/Cargo.toml index a967aaa969ad..0402b7828e58 100644 --- a/core/lib/vm_executor/Cargo.toml +++ b/core/lib/vm_executor/Cargo.toml @@ -15,7 +15,6 @@ zksync_contracts.workspace = true zksync_dal.workspace = true zksync_types.workspace = true zksync_multivm.workspace = true -zksync_utils.workspace = true async-trait.workspace = true once_cell.workspace = true @@ -26,3 +25,4 @@ vise.workspace = true [dev-dependencies] assert_matches.workspace = true +test-casing.workspace = true diff --git a/core/lib/vm_executor/src/batch/executor.rs b/core/lib/vm_executor/src/batch/executor.rs index 6dc9354fd7db..12b0718a4a56 100644 --- a/core/lib/vm_executor/src/batch/executor.rs +++ b/core/lib/vm_executor/src/batch/executor.rs @@ -99,11 +99,13 @@ where let elapsed = latency.observe(); if !res.tx_result.result.is_failed() { - let gas_per_nanosecond = - res.tx_result.statistics.computational_gas_used as f64 / elapsed.as_nanos() as f64; + let gas_used = res.tx_result.statistics.computational_gas_used; EXECUTOR_METRICS .computational_gas_per_nanosecond - .observe(gas_per_nanosecond); + .observe(gas_used as f64 / elapsed.as_nanos() as f64); + EXECUTOR_METRICS + .computational_gas_used + .observe(gas_used.into()); } else { // The amount of computational gas paid for failed transactions is hard to get // but comparing to the gas limit makes sense, since we can burn all gas @@ -111,6 +113,7 @@ where EXECUTOR_METRICS .failed_tx_gas_limit_per_nanosecond .observe(tx_gas_limit as f64 / elapsed.as_nanos() as f64); + EXECUTOR_METRICS.failed_tx_gas_limit.observe(tx_gas_limit); } Ok(res) } diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index bc19086c9692..76ef244401bd 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -6,6 +6,7 @@ use tokio::sync::mpsc; use zksync_multivm::{ interface::{ executor::{BatchExecutor, BatchExecutorFactory}, + pubdata::PubdataBuilder, storage::{ReadStorage, StoragePtr, StorageView, StorageViewStats}, utils::DivergenceHandler, BatchTransactionExecutionResult, BytecodeCompressionError, CompressedBytecodeInfo, @@ -13,12 +14,13 @@ use zksync_multivm::{ VmInterface, VmInterfaceHistoryEnabled, }, is_supported_by_fast_vm, + pubdata_builders::pubdata_params_to_builder, tracers::CallTracer, vm_fast, vm_latest::HistoryEnabled, - FastVmInstance, LegacyVmInstance, MultiVMTracer, + FastVmInstance, LegacyVmInstance, MultiVmTracer, }; -use zksync_types::{vm::FastVmMode, Transaction}; +use zksync_types::{commitment::PubdataParams, vm::FastVmMode, Transaction}; use super::{ executor::{Command, MainBatchExecutor}, @@ -35,7 +37,7 @@ pub trait BatchTracer: fmt::Debug + 'static + Send + Sealed { const TRACE_CALLS: bool; /// Tracer for the fast VM. #[doc(hidden)] - type Fast: vm_fast::Tracer + Default + 'static; + type Fast: vm_fast::interface::Tracer + Default + 'static; } impl Sealed for () {} @@ -116,6 +118,7 @@ impl BatchExecutorFactory storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Box> { // Since we process `BatchExecutor` commands one-by-one (the next command is never enqueued // until a previous command is processed), capacity 1 is enough for the commands channel. @@ -130,8 +133,14 @@ impl BatchExecutorFactory _tracer: PhantomData::, }; - let handle = - tokio::task::spawn_blocking(move || executor.run(storage, l1_batch_params, system_env)); + let handle = tokio::task::spawn_blocking(move || { + executor.run( + storage, + l1_batch_params, + system_env, + pubdata_params_to_builder(pubdata_params), + ) + }); Box::new(MainBatchExecutor::new(handle, commands_sender)) } } @@ -183,8 +192,8 @@ impl BatchVm { dispatch_batch_vm!(self.start_new_l2_block(l2_block)); } - fn finish_batch(&mut self) -> FinishedL1Batch { - dispatch_batch_vm!(self.finish_batch()) + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + dispatch_batch_vm!(self.finish_batch(pubdata_builder)) } fn make_snapshot(&mut self) { @@ -260,6 +269,7 @@ impl CommandReceiver { storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, + pubdata_builder: Rc, ) -> anyhow::Result> { tracing::info!("Starting executing L1 batch #{}", &l1_batch_params.number); @@ -310,7 +320,7 @@ impl CommandReceiver { } } Command::FinishBatch(resp) => { - let vm_block_result = self.finish_batch(&mut vm)?; + let vm_block_result = self.finish_batch(&mut vm, pubdata_builder)?; if resp.send(vm_block_result).is_err() { break; } @@ -365,10 +375,14 @@ impl CommandReceiver { latency.observe(); } - fn finish_batch(&self, vm: &mut BatchVm) -> anyhow::Result { + fn finish_batch( + &self, + vm: &mut BatchVm, + pubdata_builder: Rc, + ) -> anyhow::Result { // The vm execution was paused right after the last transaction was executed. // There is some post-processing work that the VM needs to do before the block is fully processed. - let result = vm.finish_batch(); + let result = vm.finish_batch(pubdata_builder); anyhow::ensure!( !result.block_tip_execution_result.result.is_failed(), "VM must not fail when finalizing block: {:#?}", diff --git a/core/lib/vm_executor/src/batch/metrics.rs b/core/lib/vm_executor/src/batch/metrics.rs index 6851193e9be9..37f7997c31fd 100644 --- a/core/lib/vm_executor/src/batch/metrics.rs +++ b/core/lib/vm_executor/src/batch/metrics.rs @@ -21,6 +21,10 @@ const GAS_PER_NANOSECOND_BUCKETS: Buckets = Buckets::values(&[ 0.01, 0.03, 0.1, 0.3, 0.5, 0.75, 1., 1.5, 3., 5., 10., 20., 50., ]); +const GAS_USED_BUCKETS: Buckets = Buckets::values(&[ + 10000., 25000., 45000., 70000., 100000., 150000., 225000., 350000., 500000., +]); + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "stage", rename_all = "snake_case")] pub(super) enum TxExecutionStage { @@ -37,8 +41,14 @@ pub(super) struct ExecutorMetrics { pub batch_executor_command_response_time: Family>, #[metrics(buckets = GAS_PER_NANOSECOND_BUCKETS)] pub computational_gas_per_nanosecond: Histogram, + /// Computational gas used, per transaction. + #[metrics(buckets = GAS_USED_BUCKETS)] + pub computational_gas_used: Histogram, #[metrics(buckets = GAS_PER_NANOSECOND_BUCKETS)] pub failed_tx_gas_limit_per_nanosecond: Histogram, + /// Gas limit, per failed transaction. + #[metrics(buckets = GAS_USED_BUCKETS)] + pub failed_tx_gas_limit: Histogram, /// Cumulative latency of interacting with the storage when executing a transaction /// in the batch executor. #[metrics(buckets = Buckets::LATENCIES)] diff --git a/core/lib/vm_executor/src/oneshot/block.rs b/core/lib/vm_executor/src/oneshot/block.rs index cc759c032fc1..66bdd30e40ea 100644 --- a/core/lib/vm_executor/src/oneshot/block.rs +++ b/core/lib/vm_executor/src/oneshot/block.rs @@ -1,3 +1,5 @@ +use std::time::SystemTime; + use anyhow::Context; use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_multivm::{ @@ -8,11 +10,10 @@ use zksync_types::{ api, block::{unpack_block_info, L2BlockHasher}, fee_model::BatchFeeInput, - AccountTreeId, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, H256, + h256_to_u256, AccountTreeId, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, H256, SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, ZKPORTER_IS_AVAILABLE, }; -use zksync_utils::{h256_to_u256, time::seconds_since_epoch}; use super::{env::OneshotEnvParameters, ContractsKind}; @@ -124,7 +125,11 @@ impl BlockInfo { state_l2_block_number = sealed_l2_block_header.number; // Timestamp of the next L1 batch must be greater than the timestamp of the last L2 block. - l1_batch_timestamp = seconds_since_epoch().max(sealed_l2_block_header.timestamp + 1); + let current_timestamp = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .context("incorrect system time")? + .as_secs(); + l1_batch_timestamp = current_timestamp.max(sealed_l2_block_header.timestamp + 1); sealed_l2_block_header }; @@ -203,6 +208,7 @@ impl OneshotEnvParameters { enforced_base_fee, ) .await?; + Ok(OneshotEnv { system, l1_batch, diff --git a/core/lib/vm_executor/src/oneshot/contracts.rs b/core/lib/vm_executor/src/oneshot/contracts.rs index dc9ef0c0e8df..cacab36cb1c2 100644 --- a/core/lib/vm_executor/src/oneshot/contracts.rs +++ b/core/lib/vm_executor/src/oneshot/contracts.rs @@ -26,7 +26,7 @@ impl ContractsKind for CallOrExecute {} /// Provider of [`BaseSystemContracts`] for oneshot execution. /// -/// The main implementation of this trait is [`MultiVMBaseSystemContracts`], which selects contracts +/// The main implementation of this trait is [`MultiVmBaseSystemContracts`], which selects contracts /// based on [`ProtocolVersionId`]. #[async_trait] pub trait BaseSystemContractsProvider: fmt::Debug + Send + Sync { @@ -46,7 +46,7 @@ pub trait BaseSystemContractsProvider: fmt::Debug + Send + Syn /// System contracts (bootloader and default account abstraction) for all supported VM versions. #[derive(Debug)] -pub struct MultiVMBaseSystemContracts { +pub struct MultiVmBaseSystemContracts { /// Contracts to be used for pre-virtual-blocks protocol versions. pre_virtual_blocks: BaseSystemContracts, /// Contracts to be used for post-virtual-blocks protocol versions. @@ -67,11 +67,13 @@ pub struct MultiVMBaseSystemContracts { vm_1_5_0_increased_memory: BaseSystemContracts, /// Contracts to be used after the protocol defense upgrade vm_protocol_defense: BaseSystemContracts, - // We use `fn() -> C` marker so that the `MultiVMBaseSystemContracts` unconditionally implements `Send + Sync`. + /// Contracts to be used after the gateway upgrade + gateway: BaseSystemContracts, + // We use `fn() -> C` marker so that the `MultiVmBaseSystemContracts` unconditionally implements `Send + Sync`. _contracts_kind: PhantomData C>, } -impl MultiVMBaseSystemContracts { +impl MultiVmBaseSystemContracts { fn get_by_protocol_version( &self, version: ProtocolVersionId, @@ -105,6 +107,7 @@ impl MultiVMBaseSystemContracts { ProtocolVersionId::Version25 | ProtocolVersionId::Version26 => { &self.vm_protocol_defense } + ProtocolVersionId::Version27 => &self.gateway, }; let base = base.clone(); @@ -117,7 +120,7 @@ impl MultiVMBaseSystemContracts { } } -impl MultiVMBaseSystemContracts { +impl MultiVmBaseSystemContracts { /// Returned system contracts (mainly the bootloader) are tuned to provide accurate execution metrics. pub fn load_estimate_gas_blocking() -> Self { Self { @@ -133,12 +136,13 @@ impl MultiVMBaseSystemContracts { vm_1_5_0_increased_memory: BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), vm_protocol_defense: BaseSystemContracts::estimate_gas_post_protocol_defense(), + gateway: BaseSystemContracts::estimate_gas_gateway(), _contracts_kind: PhantomData, } } } -impl MultiVMBaseSystemContracts { +impl MultiVmBaseSystemContracts { /// Returned system contracts (mainly the bootloader) are tuned to provide better UX (e.g. revert messages). pub fn load_eth_call_blocking() -> Self { Self { @@ -154,13 +158,14 @@ impl MultiVMBaseSystemContracts { vm_1_5_0_increased_memory: BaseSystemContracts::playground_post_1_5_0_increased_memory( ), vm_protocol_defense: BaseSystemContracts::playground_post_protocol_defense(), + gateway: BaseSystemContracts::playground_gateway(), _contracts_kind: PhantomData, } } } #[async_trait] -impl BaseSystemContractsProvider for MultiVMBaseSystemContracts { +impl BaseSystemContractsProvider for MultiVmBaseSystemContracts { async fn base_system_contracts( &self, block_info: &ResolvedBlockInfo, diff --git a/core/lib/vm_executor/src/oneshot/metrics.rs b/core/lib/vm_executor/src/oneshot/metrics.rs index 475463300f16..13a832ee3c89 100644 --- a/core/lib/vm_executor/src/oneshot/metrics.rs +++ b/core/lib/vm_executor/src/oneshot/metrics.rs @@ -50,7 +50,7 @@ pub(super) fn report_vm_memory_metrics( tx_id: &str, memory_metrics: &VmMemoryMetrics, vm_execution_took: Duration, - storage_metrics: &StorageViewStats, + storage_stats: &StorageViewStats, ) { MEMORY_METRICS.event_sink_size[&SizeType::Inner].observe(memory_metrics.event_sink_inner); MEMORY_METRICS.event_sink_size[&SizeType::History].observe(memory_metrics.event_sink_history); @@ -65,10 +65,18 @@ pub(super) fn report_vm_memory_metrics( MEMORY_METRICS .storage_view_cache_size - .observe(storage_metrics.cache_size); + .observe(storage_stats.cache_size); MEMORY_METRICS .full - .observe(memory_metrics.full_size() + storage_metrics.cache_size); + .observe(memory_metrics.full_size() + storage_stats.cache_size); - STORAGE_METRICS.observe(&format!("Tx {tx_id}"), vm_execution_took, storage_metrics); + report_vm_storage_metrics(tx_id, vm_execution_took, storage_stats); +} + +pub(super) fn report_vm_storage_metrics( + tx_id: &str, + vm_execution_took: Duration, + storage_stats: &StorageViewStats, +) { + STORAGE_METRICS.observe(&format!("Tx {tx_id}"), vm_execution_took, storage_stats); } diff --git a/core/lib/vm_executor/src/oneshot/mock.rs b/core/lib/vm_executor/src/oneshot/mock.rs index a7363c633c6c..89eaf3c75e29 100644 --- a/core/lib/vm_executor/src/oneshot/mock.rs +++ b/core/lib/vm_executor/src/oneshot/mock.rs @@ -4,18 +4,21 @@ use async_trait::async_trait; use zksync_multivm::interface::{ executor::{OneshotExecutor, TransactionValidator}, storage::ReadStorage, - tracer::{ValidationError, ValidationParams}, + tracer::{ValidationError, ValidationParams, ValidationTraces}, ExecutionResult, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, TxExecutionArgs, TxExecutionMode, VmExecutionResultAndLogs, }; use zksync_types::{l2::L2Tx, Transaction}; type TxResponseFn = dyn Fn(&Transaction, &OneshotEnv) -> VmExecutionResultAndLogs + Send + Sync; +type TxValidationTracesResponseFn = + dyn Fn(&Transaction, &OneshotEnv) -> ValidationTraces + Send + Sync; /// Mock [`OneshotExecutor`] implementation. pub struct MockOneshotExecutor { call_responses: Box, tx_responses: Box, + tx_validation_traces_responses: Box, } impl fmt::Debug for MockOneshotExecutor { @@ -35,6 +38,7 @@ impl Default for MockOneshotExecutor { tx_responses: Box::new(|tx, _| { panic!("Unexpect transaction call: {tx:?}"); }), + tx_validation_traces_responses: Box::new(|_, _| ValidationTraces::default()), } } } @@ -57,19 +61,20 @@ impl MockOneshotExecutor { self.tx_responses = self.wrap_responses(responses); } + pub fn set_tx_validation_traces_responses(&mut self, responses: F) + where + F: Fn(&Transaction, &OneshotEnv) -> ValidationTraces + 'static + Send + Sync, + { + self.tx_validation_traces_responses = Box::new(responses); + } + fn wrap_responses(&mut self, responses: F) -> Box where F: Fn(&Transaction, &OneshotEnv) -> ExecutionResult + 'static + Send + Sync, { Box::new( move |tx: &Transaction, env: &OneshotEnv| -> VmExecutionResultAndLogs { - VmExecutionResultAndLogs { - result: responses(tx, env), - logs: Default::default(), - statistics: Default::default(), - refunds: Default::default(), - new_known_factory_deps: None, - } + VmExecutionResultAndLogs::mock(responses(tx, env)) }, ) } @@ -82,11 +87,11 @@ impl MockOneshotExecutor { self.tx_responses = Box::new(responses); } - fn mock_inspect(&self, env: OneshotEnv, args: TxExecutionArgs) -> VmExecutionResultAndLogs { + fn mock_inspect(&self, env: &OneshotEnv, args: TxExecutionArgs) -> VmExecutionResultAndLogs { match env.system.execution_mode { - TxExecutionMode::EthCall => (self.call_responses)(&args.transaction, &env), + TxExecutionMode::EthCall => (self.call_responses)(&args.transaction, env), TxExecutionMode::VerifyExecute | TxExecutionMode::EstimateFee => { - (self.tx_responses)(&args.transaction, &env) + (self.tx_responses)(&args.transaction, env) } } } @@ -105,7 +110,7 @@ where _params: OneshotTracingParams, ) -> anyhow::Result { Ok(OneshotTransactionExecutionResult { - tx_result: Box::new(self.mock_inspect(env, args)), + tx_result: Box::new(self.mock_inspect(&env, args)), compression_result: Ok(()), call_traces: vec![], }) @@ -123,14 +128,16 @@ where env: OneshotEnv, tx: L2Tx, _validation_params: ValidationParams, - ) -> anyhow::Result> { + ) -> anyhow::Result> { Ok( match self - .mock_inspect(env, TxExecutionArgs::for_validation(tx)) + .mock_inspect(&env, TxExecutionArgs::for_validation(tx.clone())) .result { ExecutionResult::Halt { reason } => Err(ValidationError::FailedTx(reason)), - ExecutionResult::Success { .. } | ExecutionResult::Revert { .. } => Ok(()), + ExecutionResult::Success { .. } | ExecutionResult::Revert { .. } => { + Ok((self.tx_validation_traces_responses)(&tx.into(), &env)) + } }, ) } diff --git a/core/lib/vm_executor/src/oneshot/mod.rs b/core/lib/vm_executor/src/oneshot/mod.rs index 018e5abded6f..0dfdb67bff52 100644 --- a/core/lib/vm_executor/src/oneshot/mod.rs +++ b/core/lib/vm_executor/src/oneshot/mod.rs @@ -17,32 +17,36 @@ use once_cell::sync::OnceCell; use zksync_multivm::{ interface::{ executor::{OneshotExecutor, TransactionValidator}, - storage::{ReadStorage, StoragePtr, StorageView, WriteStorage}, - tracer::{ValidationError, ValidationParams}, - ExecutionResult, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, - StoredL2BlockEnv, TxExecutionArgs, TxExecutionMode, VmExecutionMode, VmInterface, + storage::{ReadStorage, StorageView, StorageWithOverrides}, + tracer::{ValidationError, ValidationParams, ValidationTraces}, + utils::{DivergenceHandler, ShadowVm}, + Call, ExecutionResult, InspectExecutionMode, OneshotEnv, OneshotTracingParams, + OneshotTransactionExecutionResult, StoredL2BlockEnv, TxExecutionArgs, TxExecutionMode, + VmFactory, VmInterface, }, - tracers::{CallTracer, StorageInvocations, ValidationTracer}, + is_supported_by_fast_vm, + tracers::{CallTracer, StorageInvocations, TracerDispatcher, ValidationTracer}, utils::adjust_pubdata_price_for_tx, - vm_latest::HistoryDisabled, + vm_latest::{HistoryDisabled, HistoryEnabled}, zk_evm_latest::ethereum_types::U256, - LegacyVmInstance, MultiVMTracer, + FastVmInstance, HistoryMode, LegacyVmInstance, MultiVmTracer, }; use zksync_types::{ block::pack_block_info, - get_nonce_key, + get_nonce_key, h256_to_u256, l2::L2Tx, + u256_to_h256, utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, + vm::FastVmMode, AccountTreeId, Nonce, StorageKey, Transaction, SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, }; -use zksync_utils::{h256_to_u256, u256_to_h256}; pub use self::{ block::{BlockInfo, ResolvedBlockInfo}, contracts::{ BaseSystemContractsProvider, CallOrExecute, ContractsKind, EstimateGas, - MultiVMBaseSystemContracts, + MultiVmBaseSystemContracts, }, env::OneshotEnvParameters, mock::MockOneshotExecutor, @@ -53,10 +57,14 @@ mod contracts; mod env; mod metrics; mod mock; +#[cfg(test)] +mod tests; /// Main [`OneshotExecutor`] implementation used by the API server. -#[derive(Debug, Default)] +#[derive(Debug)] pub struct MainOneshotExecutor { + fast_vm_mode: FastVmMode, + panic_on_divergence: bool, missed_storage_invocation_limit: usize, execution_latency_histogram: Option<&'static vise::Histogram>, } @@ -66,11 +74,28 @@ impl MainOneshotExecutor { /// The limit is applied for calls and gas estimations, but not during transaction validation. pub fn new(missed_storage_invocation_limit: usize) -> Self { Self { + fast_vm_mode: FastVmMode::Old, + panic_on_divergence: false, missed_storage_invocation_limit, execution_latency_histogram: None, } } + /// Sets the fast VM mode used by this executor. + pub fn set_fast_vm_mode(&mut self, fast_vm_mode: FastVmMode) { + if !matches!(fast_vm_mode, FastVmMode::Old) { + tracing::warn!( + "Running new VM with modes {fast_vm_mode:?}; this can lead to incorrect node behavior" + ); + } + self.fast_vm_mode = fast_vm_mode; + } + + /// Causes the VM to panic on divergence whenever it executes in the shadow mode. By default, a divergence is logged on `ERROR` level. + pub fn panic_on_divergence(&mut self) { + self.panic_on_divergence = true; + } + /// Sets a histogram for measuring VM execution latency. pub fn set_execution_latency_histogram( &mut self, @@ -78,19 +103,31 @@ impl MainOneshotExecutor { ) { self.execution_latency_histogram = Some(histogram); } + + fn select_fast_vm_mode( + &self, + env: &OneshotEnv, + tracing_params: &OneshotTracingParams, + ) -> FastVmMode { + if tracing_params.trace_calls || !is_supported_by_fast_vm(env.system.version) { + FastVmMode::Old // the fast VM doesn't support call tracing or old protocol versions + } else { + self.fast_vm_mode + } + } } #[async_trait] -impl OneshotExecutor for MainOneshotExecutor +impl OneshotExecutor> for MainOneshotExecutor where S: ReadStorage + Send + 'static, { async fn inspect_transaction_with_bytecode_compression( &self, - storage: S, + storage: StorageWithOverrides, env: OneshotEnv, args: TxExecutionArgs, - params: OneshotTracingParams, + tracing_params: OneshotTracingParams, ) -> anyhow::Result { let missed_storage_invocation_limit = match env.system.execution_mode { // storage accesses are not limited for tx validation @@ -99,35 +136,24 @@ where self.missed_storage_invocation_limit } }; - let execution_latency_histogram = self.execution_latency_histogram; + let sandbox = VmSandbox { + fast_vm_mode: self.select_fast_vm_mode(&env, &tracing_params), + panic_on_divergence: self.panic_on_divergence, + storage, + env, + execution_args: args, + execution_latency_histogram: self.execution_latency_histogram, + }; tokio::task::spawn_blocking(move || { - let mut tracers = vec![]; - let mut calls_result = Arc::>::default(); - if params.trace_calls { - tracers.push(CallTracer::new(calls_result.clone()).into_tracer_pointer()); - } - tracers.push( - StorageInvocations::new(missed_storage_invocation_limit).into_tracer_pointer(), - ); - - let executor = VmSandbox::new(storage, env, args, execution_latency_histogram); - let mut result = executor.apply(|vm, transaction| { - let (compression_result, tx_result) = vm - .inspect_transaction_with_bytecode_compression( - &mut tracers.into(), - transaction, - true, - ); - OneshotTransactionExecutionResult { - tx_result: Box::new(tx_result), - compression_result: compression_result.map(drop), - call_traces: vec![], - } - }); - - result.call_traces = Arc::make_mut(&mut calls_result).take().unwrap_or_default(); - result + sandbox.execute_in_vm(|vm, transaction| { + vm.inspect_transaction_with_bytecode_compression( + missed_storage_invocation_limit, + tracing_params, + transaction, + true, + ) + }) }) .await .context("VM execution panicked") @@ -135,41 +161,49 @@ where } #[async_trait] -impl TransactionValidator for MainOneshotExecutor +impl TransactionValidator> for MainOneshotExecutor where S: ReadStorage + Send + 'static, { async fn validate_transaction( &self, - storage: S, + storage: StorageWithOverrides, env: OneshotEnv, tx: L2Tx, validation_params: ValidationParams, - ) -> anyhow::Result> { + ) -> anyhow::Result> { anyhow::ensure!( env.system.execution_mode == TxExecutionMode::VerifyExecute, "Unexpected execution mode for tx validation: {:?} (expected `VerifyExecute`)", env.system.execution_mode ); - let execution_latency_histogram = self.execution_latency_histogram; + + let l1_batch_env = env.l1_batch.clone(); + let sandbox = VmSandbox { + fast_vm_mode: FastVmMode::Old, + panic_on_divergence: self.panic_on_divergence, + storage, + env, + execution_args: TxExecutionArgs::for_validation(tx), + execution_latency_histogram: self.execution_latency_histogram, + }; tokio::task::spawn_blocking(move || { - let (validation_tracer, mut validation_result) = - ValidationTracer::::new( - validation_params, - env.system.version.into(), - ); + let validation_tracer = ValidationTracer::::new( + validation_params, + sandbox.env.system.version.into(), + l1_batch_env, + ); + let mut validation_result = validation_tracer.get_result(); + let validation_traces = validation_tracer.get_traces(); let tracers = vec![validation_tracer.into_tracer_pointer()]; - let executor = VmSandbox::new( - storage, - env, - TxExecutionArgs::for_validation(tx), - execution_latency_histogram, - ); - let exec_result = executor.apply(|vm, transaction| { + let exec_result = sandbox.execute_in_vm(|vm, transaction| { + let Vm::Legacy(vm) = vm else { + unreachable!("Fast VM is never used for validation yet"); + }; vm.push_transaction(transaction); - vm.inspect(&mut tracers.into(), VmExecutionMode::OneTx) + vm.inspect(&mut tracers.into(), InspectExecutionMode::OneTx) }); let validation_result = Arc::make_mut(&mut validation_result) .take() @@ -178,7 +212,7 @@ where match (exec_result.result, validation_result) { (_, Err(violated_rule)) => Err(ValidationError::ViolatedRule(violated_rule)), (ExecutionResult::Halt { reason }, _) => Err(ValidationError::FailedTx(reason)), - _ => Ok(()), + _ => Ok(validation_traces.lock().unwrap().clone()), } }) .await @@ -187,70 +221,99 @@ where } #[derive(Debug)] -struct VmSandbox { - vm: Box>, - storage_view: StoragePtr>, - transaction: Transaction, - execution_latency_histogram: Option<&'static vise::Histogram>, +enum Vm { + Legacy(LegacyVmInstance), + Fast(FastVmInstance), } -impl VmSandbox { - /// This method is blocking. - fn new( - storage: S, - mut env: OneshotEnv, - execution_args: TxExecutionArgs, - execution_latency_histogram: Option<&'static vise::Histogram>, - ) -> Self { - let mut storage_view = StorageView::new(storage); - Self::setup_storage_view(&mut storage_view, &execution_args, env.current_block); - - let protocol_version = env.system.version; - if execution_args.adjust_pubdata_price { - env.l1_batch.fee_input = adjust_pubdata_price_for_tx( - env.l1_batch.fee_input, - execution_args.transaction.gas_per_pubdata_byte_limit(), - env.l1_batch.enforced_base_fee.map(U256::from), - protocol_version.into(), - ); +impl Vm { + fn inspect_transaction_with_bytecode_compression( + &mut self, + missed_storage_invocation_limit: usize, + params: OneshotTracingParams, + tx: Transaction, + with_compression: bool, + ) -> OneshotTransactionExecutionResult { + let mut calls_result = Arc::>::default(); + let (compression_result, tx_result) = match self { + Self::Legacy(vm) => { + let mut tracers = Self::create_legacy_tracers( + missed_storage_invocation_limit, + params.trace_calls.then(|| calls_result.clone()), + ); + vm.inspect_transaction_with_bytecode_compression(&mut tracers, tx, with_compression) + } + Self::Fast(vm) => { + assert!( + !params.trace_calls, + "Call tracing is not supported by fast VM yet" + ); + let legacy_tracers = Self::create_legacy_tracers::( + missed_storage_invocation_limit, + None, + ); + let mut full_tracer = (legacy_tracers.into(), ()); + vm.inspect_transaction_with_bytecode_compression( + &mut full_tracer, + tx, + with_compression, + ) + } }; - let storage_view = storage_view.to_rc_ptr(); - let vm = Box::new(LegacyVmInstance::new_with_specific_version( - env.l1_batch, - env.system, - storage_view.clone(), - protocol_version.into_api_vm_version(), - )); + OneshotTransactionExecutionResult { + tx_result: Box::new(tx_result), + compression_result: compression_result.map(drop), + call_traces: Arc::make_mut(&mut calls_result).take().unwrap_or_default(), + } + } - Self { - vm, - storage_view, - transaction: execution_args.transaction, - execution_latency_histogram, + fn create_legacy_tracers( + missed_storage_invocation_limit: usize, + calls_result: Option>>>, + ) -> TracerDispatcher, H> { + let mut tracers = vec![]; + if let Some(calls_result) = calls_result { + tracers.push(CallTracer::new(calls_result).into_tracer_pointer()); } + tracers + .push(StorageInvocations::new(missed_storage_invocation_limit).into_tracer_pointer()); + tracers.into() } +} +/// Full parameters necessary to instantiate a VM for oneshot execution. +#[derive(Debug)] +struct VmSandbox { + fast_vm_mode: FastVmMode, + panic_on_divergence: bool, + storage: StorageWithOverrides, + env: OneshotEnv, + execution_args: TxExecutionArgs, + execution_latency_histogram: Option<&'static vise::Histogram>, +} + +impl VmSandbox { /// This method is blocking. - fn setup_storage_view( - storage_view: &mut StorageView, + fn setup_storage( + storage: &mut StorageWithOverrides, execution_args: &TxExecutionArgs, current_block: Option, ) { let storage_view_setup_started_at = Instant::now(); if let Some(nonce) = execution_args.enforced_nonce { let nonce_key = get_nonce_key(&execution_args.transaction.initiator_account()); - let full_nonce = storage_view.read_value(&nonce_key); + let full_nonce = storage.read_value(&nonce_key); let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); let enforced_full_nonce = nonces_to_full_nonce(U256::from(nonce.0), deployment_nonce); - storage_view.set_value(nonce_key, u256_to_h256(enforced_full_nonce)); + storage.set_value(nonce_key, u256_to_h256(enforced_full_nonce)); } let payer = execution_args.transaction.payer(); let balance_key = storage_key_for_eth_balance(&payer); - let mut current_balance = h256_to_u256(storage_view.read_value(&balance_key)); + let mut current_balance = h256_to_u256(storage.read_value(&balance_key)); current_balance += execution_args.added_balance; - storage_view.set_value(balance_key, u256_to_h256(current_balance)); + storage.set_value(balance_key, u256_to_h256(current_balance)); // Reset L2 block info if necessary. if let Some(current_block) = current_block { @@ -260,13 +323,13 @@ impl VmSandbox { ); let l2_block_info = pack_block_info(current_block.number.into(), current_block.timestamp); - storage_view.set_value(l2_block_info_key, u256_to_h256(l2_block_info)); + storage.set_value(l2_block_info_key, u256_to_h256(l2_block_info)); let l2_block_txs_rolling_hash_key = StorageKey::new( AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, ); - storage_view.set_value( + storage.set_value( l2_block_txs_rolling_hash_key, current_block.txs_rolling_hash, ); @@ -279,30 +342,90 @@ impl VmSandbox { } } - pub(super) fn apply(mut self, apply_fn: F) -> T - where - F: FnOnce(&mut LegacyVmInstance, Transaction) -> T, - { + /// This method is blocking. + fn execute_in_vm( + mut self, + action: impl FnOnce(&mut Vm>, Transaction) -> T, + ) -> T { + Self::setup_storage( + &mut self.storage, + &self.execution_args, + self.env.current_block, + ); + + let protocol_version = self.env.system.version; + let mode = self.env.system.execution_mode; + if self.execution_args.adjust_pubdata_price { + self.env.l1_batch.fee_input = adjust_pubdata_price_for_tx( + self.env.l1_batch.fee_input, + self.execution_args.transaction.gas_per_pubdata_byte_limit(), + self.env.l1_batch.enforced_base_fee.map(U256::from), + protocol_version.into(), + ); + }; + + let transaction = self.execution_args.transaction; let tx_id = format!( "{:?}-{}", - self.transaction.initiator_account(), - self.transaction.nonce().unwrap_or(Nonce(0)) + transaction.initiator_account(), + transaction.nonce().unwrap_or(Nonce(0)) ); + let storage_view = StorageView::new(self.storage).to_rc_ptr(); + let mut vm = match self.fast_vm_mode { + FastVmMode::Old => Vm::Legacy(LegacyVmInstance::new_with_specific_version( + self.env.l1_batch, + self.env.system, + storage_view.clone(), + protocol_version.into_api_vm_version(), + )), + FastVmMode::New => Vm::Fast(FastVmInstance::fast( + self.env.l1_batch, + self.env.system, + storage_view.clone(), + )), + FastVmMode::Shadow => { + let mut vm = + ShadowVm::new(self.env.l1_batch, self.env.system, storage_view.clone()); + if !self.panic_on_divergence { + let transaction = format!("{:?}", transaction); + let handler = DivergenceHandler::new(move |errors, _| { + tracing::error!(transaction, ?mode, "{errors}"); + }); + vm.set_divergence_handler(handler); + } + Vm::Fast(FastVmInstance::Shadowed(vm)) + } + }; + let started_at = Instant::now(); - let result = apply_fn(&mut *self.vm, self.transaction); + let result = action(&mut vm, transaction); let vm_execution_took = started_at.elapsed(); if let Some(histogram) = self.execution_latency_histogram { histogram.observe(vm_execution_took); } - let memory_metrics = self.vm.record_vm_memory_metrics(); - metrics::report_vm_memory_metrics( - &tx_id, - &memory_metrics, - vm_execution_took, - &self.storage_view.borrow().stats(), - ); + + match &vm { + Vm::Legacy(vm) => { + let memory_metrics = vm.record_vm_memory_metrics(); + metrics::report_vm_memory_metrics( + &tx_id, + &memory_metrics, + vm_execution_took, + &storage_view.borrow().stats(), + ); + } + Vm::Fast(_) => { + // The new VM implementation doesn't have the same memory model as old ones, so it doesn't report memory metrics, + // only storage-related ones. + metrics::report_vm_storage_metrics( + &format!("Tx {tx_id}"), + vm_execution_took, + &storage_view.borrow().stats(), + ); + } + } result } } diff --git a/core/lib/vm_executor/src/oneshot/tests.rs b/core/lib/vm_executor/src/oneshot/tests.rs new file mode 100644 index 000000000000..9649f5b49905 --- /dev/null +++ b/core/lib/vm_executor/src/oneshot/tests.rs @@ -0,0 +1,106 @@ +//! Oneshot executor tests. + +use assert_matches::assert_matches; +use test_casing::{test_casing, Product}; +use zksync_multivm::interface::storage::InMemoryStorage; +use zksync_types::{ProtocolVersionId, H256}; + +use super::*; +use crate::testonly::{ + create_l2_transaction, default_l1_batch_env, default_system_env, FAST_VM_MODES, +}; + +const EXEC_MODES: [TxExecutionMode; 3] = [ + TxExecutionMode::EstimateFee, + TxExecutionMode::EthCall, + TxExecutionMode::VerifyExecute, +]; + +#[test] +fn selecting_vm_for_execution() { + let mut executor = MainOneshotExecutor::new(usize::MAX); + executor.set_fast_vm_mode(FastVmMode::New); + + for exec_mode in EXEC_MODES { + let env = OneshotEnv { + system: default_system_env(exec_mode), + l1_batch: default_l1_batch_env(1), + current_block: None, + }; + let mode = executor.select_fast_vm_mode(&env, &OneshotTracingParams::default()); + assert_matches!(mode, FastVmMode::New); + + // Tracing calls is not supported by the new VM. + let mode = executor.select_fast_vm_mode(&env, &OneshotTracingParams { trace_calls: true }); + assert_matches!(mode, FastVmMode::Old); + + // Old protocol versions are not supported either. + let mut old_env = env.clone(); + old_env.system.version = ProtocolVersionId::Version22; + let mode = executor.select_fast_vm_mode(&old_env, &OneshotTracingParams::default()); + assert_matches!(mode, FastVmMode::Old); + } +} + +#[test] +fn setting_up_nonce_and_balance_in_storage() { + let mut storage = StorageWithOverrides::new(InMemoryStorage::default()); + let tx = create_l2_transaction(1_000_000_000.into(), Nonce(1)); + let execution_args = TxExecutionArgs::for_gas_estimate(tx.clone().into()); + VmSandbox::setup_storage(&mut storage, &execution_args, None); + + // Check the overridden nonce and balance. + let nonce_key = get_nonce_key(&tx.initiator_account()); + assert_eq!(storage.read_value(&nonce_key), H256::from_low_u64_be(1)); + let balance_key = storage_key_for_eth_balance(&tx.initiator_account()); + let expected_added_balance = tx.common_data.fee.gas_limit * tx.common_data.fee.max_fee_per_gas; + assert_eq!( + storage.read_value(&balance_key), + u256_to_h256(expected_added_balance) + ); + + let mut storage = InMemoryStorage::default(); + storage.set_value(balance_key, H256::from_low_u64_be(2_000_000_000)); + let mut storage = StorageWithOverrides::new(storage); + VmSandbox::setup_storage(&mut storage, &execution_args, None); + + assert_eq!( + storage.read_value(&balance_key), + u256_to_h256(expected_added_balance + U256::from(2_000_000_000)) + ); +} + +#[test_casing(9, Product((EXEC_MODES, FAST_VM_MODES)))] +#[tokio::test] +async fn inspecting_transfer(exec_mode: TxExecutionMode, fast_vm_mode: FastVmMode) { + let tx = create_l2_transaction(1_000_000_000.into(), Nonce(0)); + let mut storage = InMemoryStorage::with_system_contracts(); + storage.set_value( + storage_key_for_eth_balance(&tx.initiator_account()), + u256_to_h256(u64::MAX.into()), + ); + let storage = StorageWithOverrides::new(storage); + + let l1_batch = default_l1_batch_env(1); + let env = OneshotEnv { + system: default_system_env(exec_mode), + current_block: Some(StoredL2BlockEnv { + number: l1_batch.first_l2_block.number - 1, + timestamp: l1_batch.first_l2_block.timestamp - 1, + txs_rolling_hash: H256::zero(), + }), + l1_batch, + }; + let args = TxExecutionArgs::for_gas_estimate(tx.into()); + let tracing = OneshotTracingParams::default(); + + let mut executor = MainOneshotExecutor::new(usize::MAX); + executor.set_fast_vm_mode(fast_vm_mode); + let result = executor + .inspect_transaction_with_bytecode_compression(storage, env, args, tracing) + .await + .unwrap(); + result.compression_result.unwrap(); + let exec_result = result.tx_result.result; + assert!(!exec_result.is_failed(), "{exec_result:?}"); +} diff --git a/core/lib/vm_executor/src/storage.rs b/core/lib/vm_executor/src/storage.rs index fa0e530c1909..e5a2d404233b 100644 --- a/core/lib/vm_executor/src/storage.rs +++ b/core/lib/vm_executor/src/storage.rs @@ -7,8 +7,9 @@ use zksync_contracts::BaseSystemContracts; use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use zksync_types::{ - block::L2BlockHeader, fee_model::BatchFeeInput, snapshots::SnapshotRecoveryStatus, Address, - L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, + block::L2BlockHeader, commitment::PubdataParams, fee_model::BatchFeeInput, + snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2BlockNumber, L2ChainId, + ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, }; const BATCH_COMPUTATIONAL_GAS_LIMIT: u32 = u32::MAX; @@ -263,7 +264,7 @@ impl L1BatchParamsProvider { first_l2_block_in_batch: &FirstL2BlockInBatch, validation_computational_gas_limit: u32, chain_id: L2ChainId, - ) -> anyhow::Result<(SystemEnv, L1BatchEnv)> { + ) -> anyhow::Result<(SystemEnv, L1BatchEnv, PubdataParams)> { anyhow::ensure!( first_l2_block_in_batch.l1_batch_number > L1BatchNumber(0), "Loading params for genesis L1 batch not supported" @@ -317,7 +318,7 @@ impl L1BatchParamsProvider { .await .context("failed getting base system contracts")?; - Ok(l1_batch_params( + let (system_env, l1_batch_env) = l1_batch_params( first_l2_block_in_batch.l1_batch_number, first_l2_block_in_batch.header.fee_account_address, l1_batch_timestamp, @@ -333,6 +334,12 @@ impl L1BatchParamsProvider { .context("`protocol_version` must be set for L2 block")?, first_l2_block_in_batch.header.virtual_blocks, chain_id, + ); + + Ok(( + system_env, + l1_batch_env, + first_l2_block_in_batch.header.pubdata_params, )) } @@ -346,7 +353,7 @@ impl L1BatchParamsProvider { number: L1BatchNumber, validation_computational_gas_limit: u32, chain_id: L2ChainId, - ) -> anyhow::Result> { + ) -> anyhow::Result> { let first_l2_block = self .load_first_l2_block_in_batch(storage, number) .await diff --git a/core/lib/vm_executor/src/testonly.rs b/core/lib/vm_executor/src/testonly.rs index 5bcd604a4324..2fa7f075db71 100644 --- a/core/lib/vm_executor/src/testonly.rs +++ b/core/lib/vm_executor/src/testonly.rs @@ -2,11 +2,14 @@ use once_cell::sync::Lazy; use zksync_contracts::BaseSystemContracts; use zksync_multivm::{ interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, + utils::derive_base_fee_and_gas_per_pubdata, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + zk_evm_latest::ethereum_types::U256, }; use zksync_types::{ - block::L2BlockHasher, fee_model::BatchFeeInput, vm::FastVmMode, Address, L1BatchNumber, - L2BlockNumber, L2ChainId, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, + block::L2BlockHasher, fee::Fee, fee_model::BatchFeeInput, l2::L2Tx, + transaction_request::PaymasterParams, vm::FastVmMode, Address, K256PrivateKey, L1BatchNumber, + L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, }; static BASE_SYSTEM_CONTRACTS: Lazy = @@ -43,3 +46,28 @@ pub(crate) fn default_l1_batch_env(number: u32) -> L1BatchEnv { fee_input: BatchFeeInput::sensible_l1_pegged_default(), } } + +pub(crate) fn create_l2_transaction(value: U256, nonce: Nonce) -> L2Tx { + let (max_fee_per_gas, gas_per_pubdata_limit) = derive_base_fee_and_gas_per_pubdata( + BatchFeeInput::sensible_l1_pegged_default(), + ProtocolVersionId::latest().into(), + ); + let fee = Fee { + gas_limit: 10_000_000.into(), + max_fee_per_gas: max_fee_per_gas.into(), + max_priority_fee_per_gas: 0_u64.into(), + gas_per_pubdata_limit: gas_per_pubdata_limit.into(), + }; + L2Tx::new_signed( + Some(Address::random()), + vec![], + nonce, + fee, + value, + L2ChainId::default(), + &K256PrivateKey::random(), + vec![], + PaymasterParams::default(), + ) + .unwrap() +} diff --git a/core/lib/vm_interface/src/executor.rs b/core/lib/vm_interface/src/executor.rs index 119f975fecd5..30534b1420cf 100644 --- a/core/lib/vm_interface/src/executor.rs +++ b/core/lib/vm_interface/src/executor.rs @@ -3,11 +3,11 @@ use std::fmt; use async_trait::async_trait; -use zksync_types::{l2::L2Tx, Transaction}; +use zksync_types::{commitment::PubdataParams, l2::L2Tx, Transaction}; use crate::{ storage::{ReadStorage, StorageView}, - tracer::{ValidationError, ValidationParams}, + tracer::{ValidationError, ValidationParams, ValidationTraces}, BatchTransactionExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, SystemEnv, TxExecutionArgs, }; @@ -20,6 +20,7 @@ pub trait BatchExecutorFactory: 'static + Send + fmt::Debug { storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Box>; } @@ -68,5 +69,5 @@ pub trait TransactionValidator: OneshotExecutor { env: OneshotEnv, tx: L2Tx, validation_params: ValidationParams, - ) -> anyhow::Result>; + ) -> anyhow::Result>; } diff --git a/core/lib/vm_interface/src/lib.rs b/core/lib/vm_interface/src/lib.rs index 645e3e7c856e..39f949e5d8a9 100644 --- a/core/lib/vm_interface/src/lib.rs +++ b/core/lib/vm_interface/src/lib.rs @@ -24,16 +24,16 @@ pub use crate::{ VmRevertReason, VmRevertReasonParsingError, }, inputs::{ - L1BatchEnv, L2BlockEnv, OneshotEnv, OneshotTracingParams, StoredL2BlockEnv, SystemEnv, - TxExecutionArgs, TxExecutionMode, VmExecutionMode, + InspectExecutionMode, L1BatchEnv, L2BlockEnv, OneshotEnv, OneshotTracingParams, + StoredL2BlockEnv, SystemEnv, TxExecutionArgs, TxExecutionMode, VmExecutionMode, }, outputs::{ BatchTransactionExecutionResult, BootloaderMemory, Call, CallType, CircuitStatistic, CompressedBytecodeInfo, CurrentExecutionState, DeduplicatedWritesMetrics, - ExecutionResult, FinishedL1Batch, L2Block, OneshotTransactionExecutionResult, Refunds, - TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, - VmExecutionLogs, VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, - VmMemoryMetrics, + ExecutionResult, FinishedL1Batch, L2Block, OneshotTransactionExecutionResult, + PushTransactionResult, Refunds, TransactionExecutionMetrics, + TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionLogs, + VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, VmMemoryMetrics, }, tracer, }, @@ -41,6 +41,7 @@ pub use crate::{ }; pub mod executor; +pub mod pubdata; pub mod storage; mod types; pub mod utils; diff --git a/core/lib/vm_interface/src/pubdata/mod.rs b/core/lib/vm_interface/src/pubdata/mod.rs new file mode 100644 index 000000000000..f901687b5fa6 --- /dev/null +++ b/core/lib/vm_interface/src/pubdata/mod.rs @@ -0,0 +1,90 @@ +use zksync_types::{ + l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, ProtocolVersionId, H256, U256, +}; + +/// Corresponds to the following solidity event: +/// ```solidity +/// struct L2ToL1Log { +/// uint8 l2ShardId; +/// bool isService; +/// uint16 txNumberInBlock; +/// address sender; +/// bytes32 key; +/// bytes32 value; +/// } +/// ``` +#[derive(Debug, Default, Clone, PartialEq)] +pub struct L1MessengerL2ToL1Log { + pub l2_shard_id: u8, + pub is_service: bool, + pub tx_number_in_block: u16, + pub sender: Address, + pub key: U256, + pub value: U256, +} + +impl L1MessengerL2ToL1Log { + pub fn packed_encoding(&self) -> Vec { + /// Converts `U256` value into bytes array + fn u256_to_bytes_be(value: &U256) -> Vec { + let mut bytes = vec![0u8; 32]; + value.to_big_endian(bytes.as_mut_slice()); + bytes + } + + let mut res: Vec = vec![]; + res.push(self.l2_shard_id); + res.push(self.is_service as u8); + res.extend_from_slice(&self.tx_number_in_block.to_be_bytes()); + res.extend_from_slice(self.sender.as_bytes()); + res.extend(u256_to_bytes_be(&self.key)); + res.extend(u256_to_bytes_be(&self.value)); + res + } +} + +impl From for L2ToL1Log { + fn from(log: L1MessengerL2ToL1Log) -> Self { + fn u256_to_h256(num: U256) -> H256 { + let mut bytes = [0u8; 32]; + num.to_big_endian(&mut bytes); + H256::from_slice(&bytes) + } + + L2ToL1Log { + shard_id: log.l2_shard_id, + is_service: log.is_service, + tx_number_in_block: log.tx_number_in_block, + sender: log.sender, + key: u256_to_h256(log.key), + value: u256_to_h256(log.value), + } + } +} + +/// Struct based on which the pubdata blob is formed +#[derive(Debug, Clone, Default)] +pub struct PubdataInput { + pub user_logs: Vec, + pub l2_to_l1_messages: Vec>, + pub published_bytecodes: Vec>, + pub state_diffs: Vec, +} + +/// Trait that encapsulates pubdata building logic. It is implemented for rollup and validium cases. +/// If chains needs custom pubdata format then another implementation should be added. +pub trait PubdataBuilder: std::fmt::Debug { + fn l2_da_validator(&self) -> Address; + + fn l1_messenger_operator_input( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec; + + fn settlement_layer_pubdata( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec; +} diff --git a/core/lib/vm_interface/src/storage/in_memory.rs b/core/lib/vm_interface/src/storage/in_memory.rs index d83f675cd54e..f756e7a6d76f 100644 --- a/core/lib/vm_interface/src/storage/in_memory.rs +++ b/core/lib/vm_interface/src/storage/in_memory.rs @@ -1,9 +1,9 @@ use std::collections::{hash_map::Entry, BTreeMap, HashMap}; use zksync_types::{ - block::DeployedContract, get_code_key, get_known_code_key, get_system_context_init_logs, - system_contracts::get_system_smart_contracts, L2ChainId, StorageKey, StorageLog, StorageValue, - H256, + block::DeployedContract, bytecode::BytecodeHash, get_code_key, get_known_code_key, + get_system_context_init_logs, system_contracts::get_system_smart_contracts, L2ChainId, + StorageKey, StorageLog, StorageValue, H256, }; use super::ReadStorage; @@ -21,29 +21,20 @@ pub struct InMemoryStorage { impl InMemoryStorage { /// Constructs a storage that contains system smart contracts. - pub fn with_system_contracts(bytecode_hasher: impl Fn(&[u8]) -> H256) -> Self { - Self::with_system_contracts_and_chain_id( - L2ChainId::from(IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID), - bytecode_hasher, - ) + pub fn with_system_contracts() -> Self { + Self::with_system_contracts_and_chain_id(L2ChainId::from( + IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID, + )) } /// Constructs a storage that contains system smart contracts (with a given chain id). - pub fn with_system_contracts_and_chain_id( - chain_id: L2ChainId, - bytecode_hasher: impl Fn(&[u8]) -> H256, - ) -> Self { - Self::with_custom_system_contracts_and_chain_id( - chain_id, - bytecode_hasher, - get_system_smart_contracts(false), - ) + pub fn with_system_contracts_and_chain_id(chain_id: L2ChainId) -> Self { + Self::with_custom_system_contracts_and_chain_id(chain_id, get_system_smart_contracts(false)) } /// Constructs a storage that contains custom system contracts (provided in a vector). pub fn with_custom_system_contracts_and_chain_id( chain_id: L2ChainId, - bytecode_hasher: impl Fn(&[u8]) -> H256, contracts: Vec, ) -> Self { let system_context_init_log = get_system_context_init_logs(chain_id); @@ -51,7 +42,7 @@ impl InMemoryStorage { let state_without_indices: BTreeMap<_, _> = contracts .iter() .flat_map(|contract| { - let bytecode_hash = bytecode_hasher(&contract.bytecode); + let bytecode_hash = BytecodeHash::for_bytecode(&contract.bytecode).value(); let deployer_code_key = get_code_key(contract.account_id.address()); let is_known_code_key = get_known_code_key(&bytecode_hash); @@ -72,7 +63,12 @@ impl InMemoryStorage { let factory_deps = contracts .into_iter() - .map(|contract| (bytecode_hasher(&contract.bytecode), contract.bytecode)) + .map(|contract| { + ( + BytecodeHash::for_bytecode(&contract.bytecode).value(), + contract.bytecode, + ) + }) .collect(); let last_enum_index_set = state.len() as u64; diff --git a/core/lib/vm_interface/src/storage/mod.rs b/core/lib/vm_interface/src/storage/mod.rs index 6cdcd33db682..aade56ca5d96 100644 --- a/core/lib/vm_interface/src/storage/mod.rs +++ b/core/lib/vm_interface/src/storage/mod.rs @@ -5,11 +5,13 @@ use zksync_types::{get_known_code_key, StorageKey, StorageValue, H256}; pub use self::{ // Note, that `test_infra` of the bootloader tests relies on this value to be exposed in_memory::{InMemoryStorage, IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID}, + overrides::StorageWithOverrides, snapshot::{StorageSnapshot, StorageWithSnapshot}, view::{ImmutableStorageView, StorageView, StorageViewCache, StorageViewStats}, }; mod in_memory; +mod overrides; mod snapshot; mod view; diff --git a/core/lib/vm_interface/src/storage/overrides.rs b/core/lib/vm_interface/src/storage/overrides.rs new file mode 100644 index 000000000000..ad5a3d8624f1 --- /dev/null +++ b/core/lib/vm_interface/src/storage/overrides.rs @@ -0,0 +1,70 @@ +//! VM storage functionality specifically used in the VM sandbox. + +use std::{ + collections::{HashMap, HashSet}, + fmt, +}; + +use zksync_types::{AccountTreeId, StorageKey, StorageValue, H256}; + +use super::ReadStorage; + +/// A storage view that allows to override some of the storage values. +#[derive(Debug)] +pub struct StorageWithOverrides { + storage_handle: S, + overridden_slots: HashMap, + overridden_factory_deps: HashMap>, + empty_accounts: HashSet, +} + +impl StorageWithOverrides { + /// Creates a new storage view based on the underlying storage. + pub fn new(storage: S) -> Self { + Self { + storage_handle: storage, + overridden_slots: HashMap::new(), + overridden_factory_deps: HashMap::new(), + empty_accounts: HashSet::new(), + } + } + + pub fn set_value(&mut self, key: StorageKey, value: StorageValue) { + self.overridden_slots.insert(key, value); + } + + pub fn store_factory_dep(&mut self, hash: H256, code: Vec) { + self.overridden_factory_deps.insert(hash, code); + } + + pub fn insert_erased_account(&mut self, account: AccountTreeId) { + self.empty_accounts.insert(account); + } +} + +impl ReadStorage for StorageWithOverrides { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + if let Some(value) = self.overridden_slots.get(key) { + return *value; + } + if self.empty_accounts.contains(key.account()) { + return H256::zero(); + } + self.storage_handle.read_value(key) + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + self.storage_handle.is_write_initial(key) + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + self.overridden_factory_deps + .get(&hash) + .cloned() + .or_else(|| self.storage_handle.load_factory_dep(hash)) + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + self.storage_handle.get_enumeration_index(key) + } +} diff --git a/core/lib/vm_interface/src/storage/view.rs b/core/lib/vm_interface/src/storage/view.rs index ec9267609e23..249d584c9f6c 100644 --- a/core/lib/vm_interface/src/storage/view.rs +++ b/core/lib/vm_interface/src/storage/view.rs @@ -102,6 +102,16 @@ impl StorageView { pub fn cache(&self) -> StorageViewCache { self.cache.clone() } + + /// Provides mutable access to the underlying storage. + /// + /// # Warning + /// + /// Mutating the underlying storage directly can easily break implied `StorageView` invariants, so use with care. + #[doc(hidden)] + pub fn inner_mut(&mut self) -> &mut S { + &mut self.storage_handle + } } impl ReadStorage for Box diff --git a/core/lib/vm_interface/src/types/errors/halt.rs b/core/lib/vm_interface/src/types/errors/halt.rs index 88328e42b812..d24f55ab5044 100644 --- a/core/lib/vm_interface/src/types/errors/halt.rs +++ b/core/lib/vm_interface/src/types/errors/halt.rs @@ -42,6 +42,7 @@ pub enum Halt { VMPanic, TracerCustom(String), FailedToPublishCompressedBytecodes, + FailedBlockTimestampAssertion, } impl fmt::Display for Halt { @@ -116,6 +117,9 @@ impl fmt::Display for Halt { Halt::FailedToPublishCompressedBytecodes => { write!(f, "Failed to publish compressed bytecodes") } + Halt::FailedBlockTimestampAssertion => { + write!(f, "Transaction failed block.timestamp assertion") + } } } } diff --git a/core/lib/vm_interface/src/types/inputs/execution_mode.rs b/core/lib/vm_interface/src/types/inputs/execution_mode.rs index 41492af6edc5..f091a259d30d 100644 --- a/core/lib/vm_interface/src/types/inputs/execution_mode.rs +++ b/core/lib/vm_interface/src/types/inputs/execution_mode.rs @@ -13,3 +13,22 @@ pub enum VmExecutionMode { /// Stop after executing the entire bootloader. But before you exit the bootloader. Bootloader, } + +/// Subset of `VmExecutionMode` variants that do not require any additional input +/// and can be invoked with `inspect` method. +#[derive(Debug, Copy, Clone)] +pub enum InspectExecutionMode { + /// Stop after executing the next transaction. + OneTx, + /// Stop after executing the entire bootloader. But before you exit the bootloader. + Bootloader, +} + +impl From for VmExecutionMode { + fn from(mode: InspectExecutionMode) -> Self { + match mode { + InspectExecutionMode::Bootloader => Self::Bootloader, + InspectExecutionMode::OneTx => Self::OneTx, + } + } +} diff --git a/core/lib/vm_interface/src/types/inputs/mod.rs b/core/lib/vm_interface/src/types/inputs/mod.rs index 24f58ae72f16..83f87f0fe1dd 100644 --- a/core/lib/vm_interface/src/types/inputs/mod.rs +++ b/core/lib/vm_interface/src/types/inputs/mod.rs @@ -3,7 +3,7 @@ use zksync_types::{ }; pub use self::{ - execution_mode::VmExecutionMode, + execution_mode::{InspectExecutionMode, VmExecutionMode}, l1_batch_env::L1BatchEnv, l2_block::{L2BlockEnv, StoredL2BlockEnv}, system_env::{SystemEnv, TxExecutionMode}, @@ -15,7 +15,7 @@ mod l2_block; mod system_env; /// Full environment for oneshot transaction / call execution. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct OneshotEnv { /// System environment. pub system: SystemEnv, diff --git a/core/lib/vm_interface/src/types/outputs/execution_result.rs b/core/lib/vm_interface/src/types/outputs/execution_result.rs index 018ea075db51..9bb784fbf71c 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_result.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_result.rs @@ -21,10 +21,6 @@ const L1_MESSAGE_EVENT_SIGNATURE: H256 = H256([ 58, 54, 228, 114, 145, 244, 32, 31, 175, 19, 127, 171, 8, 29, 146, 41, 91, 206, 45, 83, 190, 44, 108, 166, 139, 168, 44, 127, 170, 156, 226, 65, ]); -const PUBLISHED_BYTECODE_SIGNATURE: H256 = H256([ - 201, 71, 34, 255, 19, 234, 207, 83, 84, 124, 71, 65, 218, 181, 34, 131, 83, 160, 89, 56, 255, - 205, 213, 212, 162, 213, 51, 174, 14, 97, 130, 135, -]); pub fn bytecode_len_in_bytes(bytecodehash: H256) -> usize { usize::from(u16::from_be_bytes([bytecodehash[2], bytecodehash[3]])) * 32 @@ -50,6 +46,11 @@ impl VmEvent { 72, 13, 60, 159, 114, 123, 94, 92, 18, 3, 212, 198, 31, 177, 133, 211, 127, 8, 230, 178, 220, 94, 155, 191, 152, 89, 27, 26, 122, 221, 245, 124, ]); + /// Long signature of the known bytecodes storage bytecode publication event (`MarkedAsKnown`). + pub const PUBLISHED_BYTECODE_SIGNATURE: H256 = H256([ + 201, 71, 34, 255, 19, 234, 207, 83, 84, 124, 71, 65, 218, 181, 34, 131, 83, 160, 89, 56, + 255, 205, 213, 212, 162, 213, 51, 174, 14, 97, 130, 135, + ]); /// Extracts all the "long" L2->L1 messages that were submitted by the L1Messenger contract. pub fn extract_long_l2_to_l1_messages(events: &[Self]) -> Vec> { @@ -79,12 +80,25 @@ impl VmEvent { // Filter events from the deployer contract that match the expected signature. event.address == KNOWN_CODES_STORAGE_ADDRESS && event.indexed_topics.len() == 3 - && event.indexed_topics[0] == PUBLISHED_BYTECODE_SIGNATURE + && event.indexed_topics[0] == Self::PUBLISHED_BYTECODE_SIGNATURE && event.indexed_topics[2] != H256::zero() }) .map(|event| event.indexed_topics[1]) .collect() } + + /// Extracts all bytecodes marked as known on the system contracts. + pub fn extract_bytecodes_marked_as_known(events: &[Self]) -> impl Iterator + '_ { + events + .iter() + .filter(|event| { + // Filter events from the deployer contract that match the expected signature. + event.address == KNOWN_CODES_STORAGE_ADDRESS + && event.indexed_topics.len() == 3 + && event.indexed_topics[0] == Self::PUBLISHED_BYTECODE_SIGNATURE + }) + .map(|event| event.indexed_topics[1]) + } } /// Refunds produced for the user. @@ -120,10 +134,10 @@ pub struct VmExecutionResultAndLogs { pub logs: VmExecutionLogs, pub statistics: VmExecutionStatistics, pub refunds: Refunds, - /// Bytecodes decommitted during VM execution. `None` if not computed by the VM. - // FIXME: currently, this is only filled up by `vm_latest`; probably makes sense to narrow down - // to *dynamic* factory deps, so that `HashMap::new()` is a valid value for VMs not supporting EVM emulation. - pub new_known_factory_deps: Option>>, + /// Dynamic bytecodes decommitted during VM execution (i.e., not present in the storage at the start of VM execution + /// or in `factory_deps` fields of executed transactions). Currently, the only kind of such codes are EVM bytecodes. + /// Correspondingly, they may only be present if supported by the VM version, and if the VM is initialized with the EVM emulator base system contract. + pub dynamic_factory_deps: HashMap>, } #[derive(Debug, Clone, PartialEq)] @@ -144,6 +158,22 @@ impl ExecutionResult { } impl VmExecutionResultAndLogs { + /// Creates a mock full result based on the provided base result. + pub fn mock(result: ExecutionResult) -> Self { + Self { + result, + logs: VmExecutionLogs::default(), + statistics: VmExecutionStatistics::default(), + refunds: Refunds::default(), + dynamic_factory_deps: HashMap::new(), + } + } + + /// Creates a mock successful result with no payload. + pub fn mock_success() -> Self { + Self::mock(ExecutionResult::Success { output: vec![] }) + } + pub fn get_execution_metrics(&self, tx: Option<&Transaction>) -> VmExecutionMetrics { let contracts_deployed = tx .map(|tx| tx.execute.factory_deps.len() as u16) @@ -414,6 +444,6 @@ mod tests { "MarkedAsKnown", &[ethabi::ParamType::FixedBytes(32), ethabi::ParamType::Bool], ); - assert_eq!(PUBLISHED_BYTECODE_SIGNATURE, expected_signature); + assert_eq!(VmEvent::PUBLISHED_BYTECODE_SIGNATURE, expected_signature); } } diff --git a/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs b/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs index 8f7c1d4fb0d6..7e90d425ab15 100644 --- a/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs +++ b/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs @@ -1,7 +1,6 @@ use zksync_types::writes::StateDiffRecord; use super::{BootloaderMemory, CurrentExecutionState, VmExecutionResultAndLogs}; -use crate::{ExecutionResult, Refunds, VmExecutionLogs, VmExecutionStatistics}; /// State of the VM after the batch execution. #[derive(Debug, Clone)] @@ -21,13 +20,7 @@ pub struct FinishedL1Batch { impl FinishedL1Batch { pub fn mock() -> Self { FinishedL1Batch { - block_tip_execution_result: VmExecutionResultAndLogs { - result: ExecutionResult::Success { output: vec![] }, - logs: VmExecutionLogs::default(), - statistics: VmExecutionStatistics::default(), - refunds: Refunds::default(), - new_known_factory_deps: None, - }, + block_tip_execution_result: VmExecutionResultAndLogs::mock_success(), final_execution_state: CurrentExecutionState { events: vec![], deduplicated_storage_logs: vec![], diff --git a/core/lib/vm_interface/src/types/outputs/mod.rs b/core/lib/vm_interface/src/types/outputs/mod.rs index 1fa1cd5d1688..fe25801dd12e 100644 --- a/core/lib/vm_interface/src/types/outputs/mod.rs +++ b/core/lib/vm_interface/src/types/outputs/mod.rs @@ -1,3 +1,5 @@ +use std::borrow::Cow; + pub use self::{ bytecode::CompressedBytecodeInfo, execution_result::{ @@ -20,3 +22,14 @@ mod execution_state; mod finished_l1batch; mod l2_block; mod statistic; + +/// Result of pushing a transaction to the VM state without executing it. +#[derive(Debug)] +pub struct PushTransactionResult<'a> { + /// Compressed bytecodes for the transaction. If the VM doesn't support bytecode compression, returns + /// an empty slice. + /// + /// Importantly, these bytecodes are not guaranteed to be published by the transaction; + /// e.g., it may run out of gas during publication. + pub compressed_bytecodes: Cow<'a, [CompressedBytecodeInfo]>, +} diff --git a/core/lib/vm_interface/src/types/tracer.rs b/core/lib/vm_interface/src/types/tracer.rs index ba07772c7f23..1c3f65f443ef 100644 --- a/core/lib/vm_interface/src/types/tracer.rs +++ b/core/lib/vm_interface/src/types/tracer.rs @@ -1,4 +1,4 @@ -use std::{collections::HashSet, fmt}; +use std::{collections::HashSet, fmt, ops::Range, time}; use zksync_types::{Address, U256}; @@ -57,6 +57,17 @@ pub struct ValidationParams { pub trusted_address_slots: HashSet<(Address, U256)>, /// Number of computational gas that validation step is allowed to use. pub computational_gas_limit: u32, + /// Parameters of the timestamp asserter if configured + pub timestamp_asserter_params: Option, +} + +#[derive(Debug, Clone)] +pub struct TimestampAsserterParams { + /// Address of the timestamp asserter. This contract is allowed to touch block.timestamp regardless + /// of the calling context. + pub address: Address, + /// Minimum time between current block.timestamp and the end of the asserted range + pub min_time_till_end: time::Duration, } /// Rules that can be violated when validating a transaction. @@ -70,6 +81,8 @@ pub enum ViolatedValidationRule { TouchedDisallowedContext, /// The transaction used too much gas during validation. TookTooManyComputationalGas(u32), + /// The transaction failed block.timestamp assertion because the block.timestamp is too close to the range end + TimestampAssertionCloseToRangeEnd, } impl fmt::Display for ViolatedValidationRule { @@ -91,6 +104,9 @@ impl fmt::Display for ViolatedValidationRule { "Took too many computational gas, allowed limit: {gas_limit}" ) } + ViolatedValidationRule::TimestampAssertionCloseToRangeEnd => { + write!(f, "block.timestamp is too close to the range end") + } } } } @@ -104,6 +120,30 @@ pub enum ValidationError { ViolatedRule(ViolatedValidationRule), } +/// Traces the validation of a transaction, providing visibility into the aspects the transaction interacts with. +/// For instance, the `timestamp_asserter_range` represent the range within which the transaction might make +/// assertions on `block.timestamp`. This information is crucial for the caller, as expired transactions should +/// be excluded from the mempool. +#[derive(Debug, Clone, Default)] +pub struct ValidationTraces { + pub timestamp_asserter_range: Option>, +} + +impl ValidationTraces { + /// Merges two ranges by selecting the maximum of the start values and the minimum of the end values, + /// producing the narrowest possible time window. Note that overlapping ranges are essential; + /// a lack of overlap would have triggered an assertion failure in the `TimestampAsserter` contract, + /// as `block.timestamp` cannot satisfy two non-overlapping ranges. + pub fn apply_timestamp_asserter_range(&mut self, new_range: Range) { + if let Some(range) = &mut self.timestamp_asserter_range { + range.start = range.start.max(new_range.start); + range.end = range.end.min(new_range.end); + } else { + self.timestamp_asserter_range = Some(new_range); + } + } +} + impl fmt::Display for ValidationError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { @@ -116,3 +156,36 @@ impl fmt::Display for ValidationError { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_apply_range_when_none() { + let mut validation_traces = ValidationTraces { + timestamp_asserter_range: None, + }; + let new_range = 10..20; + validation_traces.apply_timestamp_asserter_range(new_range.clone()); + assert_eq!(validation_traces.timestamp_asserter_range, Some(new_range)); + } + + #[test] + fn test_apply_range_with_overlap_narrower_result() { + let mut validation_traces = ValidationTraces { + timestamp_asserter_range: Some(5..25), + }; + validation_traces.apply_timestamp_asserter_range(10..20); + assert_eq!(validation_traces.timestamp_asserter_range, Some(10..20)); + } + + #[test] + fn test_apply_range_with_partial_overlap() { + let mut validation_traces = ValidationTraces { + timestamp_asserter_range: Some(10..30), + }; + validation_traces.apply_timestamp_asserter_range(20..40); + assert_eq!(validation_traces.timestamp_asserter_range, Some(20..30)); + } +} diff --git a/core/lib/vm_interface/src/utils/dump.rs b/core/lib/vm_interface/src/utils/dump.rs index 288c6445494d..f23d6f307b89 100644 --- a/core/lib/vm_interface/src/utils/dump.rs +++ b/core/lib/vm_interface/src/utils/dump.rs @@ -1,13 +1,14 @@ -use std::collections::HashMap; +use std::{collections::HashMap, rc::Rc}; use serde::{Deserialize, Serialize}; use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2BlockNumber, Transaction, H256}; use crate::{ + pubdata::PubdataBuilder, storage::{ReadStorage, StoragePtr, StorageSnapshot, StorageView}, - BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, - VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - VmTrackingContracts, + BytecodeCompressionResult, FinishedL1Batch, InspectExecutionMode, L1BatchEnv, L2BlockEnv, + PushTransactionResult, SystemEnv, VmExecutionResultAndLogs, VmFactory, VmInterface, + VmInterfaceExt, VmInterfaceHistoryEnabled, VmTrackingContracts, }; fn create_storage_snapshot( @@ -48,6 +49,7 @@ fn create_storage_snapshot( } /// VM dump allowing to re-run the VM on the same inputs. Can be (de)serialized. +/// Note, dump is not capable of finishing batch in terms of VM execution. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct VmDump { pub l1_batch_env: L1BatchEnv, @@ -98,7 +100,6 @@ impl VmDump { } } } - vm.finish_batch(); vm } } @@ -139,18 +140,30 @@ impl DumpingVm { } } +impl AsRef for DumpingVm { + fn as_ref(&self) -> &Vm { + &self.inner + } +} + +impl AsMut for DumpingVm { + fn as_mut(&mut self) -> &mut Vm { + &mut self.inner + } +} + impl VmInterface for DumpingVm { type TracerDispatcher = Vm::TracerDispatcher; - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult { self.record_transaction(tx.clone()); - self.inner.push_transaction(tx); + self.inner.push_transaction(tx) } fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { self.inner.inspect(dispatcher, execution_mode) } @@ -177,8 +190,8 @@ impl VmInterface for DumpingVm { .inspect_transaction_with_bytecode_compression(tracer, tx, with_compression) } - fn finish_batch(&mut self) -> FinishedL1Batch { - self.inner.finish_batch() + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + self.inner.finish_batch(pubdata_builder) } } diff --git a/core/lib/vm_interface/src/utils/mod.rs b/core/lib/vm_interface/src/utils/mod.rs index 80a51c7b144f..394df7fc9a1d 100644 --- a/core/lib/vm_interface/src/utils/mod.rs +++ b/core/lib/vm_interface/src/utils/mod.rs @@ -2,7 +2,9 @@ pub use self::{ dump::VmDump, - shadow::{DivergenceErrors, DivergenceHandler, ShadowVm}, + shadow::{ + CheckDivergence, DivergenceErrors, DivergenceHandler, ShadowMut, ShadowRef, ShadowVm, + }, }; mod dump; diff --git a/core/lib/vm_interface/src/utils/shadow.rs b/core/lib/vm_interface/src/utils/shadow.rs index 92eb65a810f7..0883971f4de8 100644 --- a/core/lib/vm_interface/src/utils/shadow.rs +++ b/core/lib/vm_interface/src/utils/shadow.rs @@ -1,7 +1,9 @@ use std::{ + any, cell::RefCell, collections::{BTreeMap, BTreeSet}, fmt, + rc::Rc, sync::Arc, }; @@ -9,10 +11,11 @@ use zksync_types::{StorageKey, StorageLog, StorageLogWithPreviousValue, Transact use super::dump::{DumpingVm, VmDump}; use crate::{ + pubdata::PubdataBuilder, storage::{ReadStorage, StoragePtr, StorageView}, - BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmTrackingContracts, + BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, InspectExecutionMode, + L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmTrackingContracts, }; /// Handler for VM divergences. @@ -65,6 +68,162 @@ impl VmWithReporting { } } +/// Reference to either the main or shadow VM. +#[derive(Debug)] +pub enum ShadowRef<'a, Main, Shadow> { + /// Reference to the main VM. + Main(&'a Main), + /// Reference to the shadow VM. + Shadow(&'a Shadow), +} + +/// Mutable reference to either the main or shadow VM. +#[derive(Debug)] +pub enum ShadowMut<'a, Main, Shadow> { + /// Reference to the main VM. + Main(&'a mut Main), + /// Reference to the shadow VM. + Shadow(&'a mut Shadow), +} + +/// Type that can check divergence between its instances. +pub trait CheckDivergence { + /// Checks divergences and returns a list of divergence errors, if any. + fn check_divergence(&self, other: &Self) -> DivergenceErrors; +} + +#[derive(Debug)] +struct DivergingEq(T); + +impl CheckDivergence for DivergingEq { + fn check_divergence(&self, other: &Self) -> DivergenceErrors { + let mut errors = DivergenceErrors::new(); + errors.check_match(any::type_name::(), &self.0, &other.0); + errors + } +} + +impl CheckDivergence for CurrentExecutionState { + fn check_divergence(&self, other: &Self) -> DivergenceErrors { + let mut errors = DivergenceErrors::new(); + errors.check_match("final_state.events", &self.events, &other.events); + errors.check_match( + "final_state.user_l2_to_l1_logs", + &self.user_l2_to_l1_logs, + &other.user_l2_to_l1_logs, + ); + errors.check_match( + "final_state.system_logs", + &self.system_logs, + &other.system_logs, + ); + errors.check_match( + "final_state.storage_refunds", + &self.storage_refunds, + &other.storage_refunds, + ); + errors.check_match( + "final_state.pubdata_costs", + &self.pubdata_costs, + &other.pubdata_costs, + ); + errors.check_match( + "final_state.used_contract_hashes", + &self.used_contract_hashes.iter().collect::>(), + &other.used_contract_hashes.iter().collect::>(), + ); + + let main_deduplicated_logs = DivergenceErrors::gather_logs(&self.deduplicated_storage_logs); + let shadow_deduplicated_logs = + DivergenceErrors::gather_logs(&other.deduplicated_storage_logs); + errors.check_match( + "deduplicated_storage_logs", + &main_deduplicated_logs, + &shadow_deduplicated_logs, + ); + errors + } +} + +impl CheckDivergence for VmExecutionResultAndLogs { + fn check_divergence(&self, other: &Self) -> DivergenceErrors { + let mut errors = DivergenceErrors::new(); + errors.check_match("result", &self.result, &other.result); + errors.check_match("logs.events", &self.logs.events, &other.logs.events); + errors.check_match( + "logs.system_l2_to_l1_logs", + &self.logs.system_l2_to_l1_logs, + &other.logs.system_l2_to_l1_logs, + ); + errors.check_match( + "logs.user_l2_to_l1_logs", + &self.logs.user_l2_to_l1_logs, + &other.logs.user_l2_to_l1_logs, + ); + let main_logs = UniqueStorageLogs::new(&self.logs.storage_logs); + let shadow_logs = UniqueStorageLogs::new(&other.logs.storage_logs); + errors.check_match("logs.storage_logs", &main_logs, &shadow_logs); + errors.check_match("refunds", &self.refunds, &other.refunds); + errors.check_match( + "statistics.circuit_statistic", + &self.statistics.circuit_statistic, + &other.statistics.circuit_statistic, + ); + errors.check_match( + "statistics.pubdata_published", + &self.statistics.pubdata_published, + &other.statistics.pubdata_published, + ); + errors.check_match( + "statistics.gas_remaining", + &self.statistics.gas_remaining, + &other.statistics.gas_remaining, + ); + errors.check_match( + "statistics.gas_used", + &self.statistics.gas_used, + &other.statistics.gas_used, + ); + errors.check_match( + "statistics.computational_gas_used", + &self.statistics.computational_gas_used, + &other.statistics.computational_gas_used, + ); + + // Order deps to have a more reasonable diff on a mismatch + let these_deps = self.dynamic_factory_deps.iter().collect::>(); + let other_deps = other + .dynamic_factory_deps + .iter() + .collect::>(); + errors.check_match("dynamic_factory_deps", &these_deps, &other_deps); + errors + } +} + +impl CheckDivergence for FinishedL1Batch { + fn check_divergence(&self, other: &Self) -> DivergenceErrors { + let mut errors = DivergenceErrors::new(); + errors.extend( + self.block_tip_execution_result + .check_divergence(&other.block_tip_execution_result), + ); + errors.extend( + self.final_execution_state + .check_divergence(&other.final_execution_state), + ); + + errors.check_match( + "final_bootloader_memory", + &self.final_bootloader_memory, + &other.final_bootloader_memory, + ); + errors.check_match("pubdata_input", &self.pubdata_input, &other.pubdata_input); + errors.check_match("state_diffs", &self.state_diffs, &other.state_diffs); + errors + } +} + /// Shadowed VM that executes 2 VMs for each operation and compares their outputs. /// /// If a divergence is detected, the VM state is dumped using [a pluggable handler](Self::set_dump_handler()), @@ -105,6 +264,66 @@ where pub fn dump_state(&self) -> VmDump { self.main.dump_state() } + + /// Gets the specified value from both the main and shadow VM, checking whether it matches on both. + pub fn get(&self, name: &str, mut action: impl FnMut(ShadowRef<'_, Main, Shadow>) -> R) -> R + where + R: PartialEq + fmt::Debug + 'static, + { + self.get_custom(name, |r| DivergingEq(action(r))).0 + } + + /// Same as [`Self::get()`], but uses custom divergence checks for the type encapsulated in the [`CheckDivergence`] trait. + pub fn get_custom( + &self, + name: &str, + mut action: impl FnMut(ShadowRef<'_, Main, Shadow>) -> R, + ) -> R { + let main_output = action(ShadowRef::Main(self.main.as_ref())); + let borrow = self.shadow.borrow(); + if let Some(shadow) = &*borrow { + let shadow_output = action(ShadowRef::Shadow(&shadow.vm)); + let errors = main_output.check_divergence(&shadow_output); + if let Err(err) = errors.into_result() { + drop(borrow); + self.report_shared(err.context(format!("get({name})"))); + } + } + main_output + } + + /// Gets the specified value from both the main and shadow VM, potentially changing their state + /// and checking whether the returned value matches. + pub fn get_mut( + &mut self, + name: &str, + mut action: impl FnMut(ShadowMut<'_, Main, Shadow>) -> R, + ) -> R + where + R: PartialEq + fmt::Debug + 'static, + { + self.get_custom_mut(name, |r| DivergingEq(action(r))).0 + } + + /// Same as [`Self::get_mut()`], but uses custom divergence checks for the type encapsulated in the [`CheckDivergence`] trait. + pub fn get_custom_mut( + &mut self, + name: &str, + mut action: impl FnMut(ShadowMut<'_, Main, Shadow>) -> R, + ) -> R + where + R: CheckDivergence, + { + let main_output = action(ShadowMut::Main(self.main.as_mut())); + if let Some(shadow) = self.shadow.get_mut() { + let shadow_output = action(ShadowMut::Shadow(&mut shadow.vm)); + let errors = main_output.check_divergence(&shadow_output); + if let Err(err) = errors.into_result() { + self.report_shared(err.context(format!("get_mut({name})"))); + } + } + main_output + } } impl ShadowVm @@ -123,7 +342,7 @@ where where Shadow: VmFactory, { - let main = DumpingVm::new(batch_env.clone(), system_env.clone(), storage.clone()); + let main = DumpingVm::new(batch_env.clone(), system_env.clone(), storage); let shadow = Shadow::new(batch_env.clone(), system_env.clone(), shadow_storage); let shadow = VmWithReporting { vm: shadow, @@ -151,7 +370,6 @@ where } } -/// **Important.** This doesn't properly handle tracers; they are not passed to the shadow VM! impl VmInterface for ShadowVm where S: ReadStorage, @@ -163,24 +381,41 @@ where ::TracerDispatcher, ); - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + let main_result = self.main.push_transaction(tx.clone()); + // Extend lifetime to `'static` so that the result isn't mutably borrowed from the main VM. + // Unfortunately, there's no way to express that this borrow is actually immutable, which would allow not extending the lifetime unless there's a divergence. + let main_result: PushTransactionResult<'static> = PushTransactionResult { + compressed_bytecodes: main_result.compressed_bytecodes.into_owned().into(), + }; + if let Some(shadow) = self.shadow.get_mut() { - shadow.vm.push_transaction(tx.clone()); + let tx_repr = format!("{tx:?}"); // includes little data, so is OK to call proactively + let shadow_result = shadow.vm.push_transaction(tx); + + let mut errors = DivergenceErrors::new(); + errors.check_match( + "bytecodes", + &main_result.compressed_bytecodes, + &shadow_result.compressed_bytecodes, + ); + if let Err(err) = errors.into_result() { + let ctx = format!("pushing transaction {tx_repr}"); + self.report(err.context(ctx)); + } } - self.main.push_transaction(tx); + main_result } fn inspect( &mut self, (main_tracer, shadow_tracer): &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { let main_result = self.main.inspect(main_tracer, execution_mode); if let Some(shadow) = self.shadow.get_mut() { let shadow_result = shadow.vm.inspect(shadow_tracer, execution_mode); - let mut errors = DivergenceErrors::new(); - errors.check_results_match(&main_result, &shadow_result); - + let errors = main_result.check_divergence(&shadow_result); if let Err(err) = errors.into_result() { let ctx = format!("executing VM with mode {execution_mode:?}"); self.report(err.context(ctx)); @@ -221,8 +456,7 @@ where tx, with_compression, ); - let mut errors = DivergenceErrors::new(); - errors.check_results_match(&main_tx_result, &shadow_result.1); + let errors = main_tx_result.check_divergence(&shadow_result.1); if let Err(err) = errors.into_result() { let ctx = format!( "inspecting transaction {tx_repr}, with_compression={with_compression:?}" @@ -233,35 +467,11 @@ where (main_bytecodes_result, main_tx_result) } - fn finish_batch(&mut self) -> FinishedL1Batch { - let main_batch = self.main.finish_batch(); + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + let main_batch = self.main.finish_batch(pubdata_builder.clone()); if let Some(shadow) = self.shadow.get_mut() { - let shadow_batch = shadow.vm.finish_batch(); - let mut errors = DivergenceErrors::new(); - errors.check_results_match( - &main_batch.block_tip_execution_result, - &shadow_batch.block_tip_execution_result, - ); - errors.check_final_states_match( - &main_batch.final_execution_state, - &shadow_batch.final_execution_state, - ); - errors.check_match( - "final_bootloader_memory", - &main_batch.final_bootloader_memory, - &shadow_batch.final_bootloader_memory, - ); - errors.check_match( - "pubdata_input", - &main_batch.pubdata_input, - &shadow_batch.pubdata_input, - ); - errors.check_match( - "state_diffs", - &main_batch.state_diffs, - &shadow_batch.state_diffs, - ); - + let shadow_batch = shadow.vm.finish_batch(pubdata_builder); + let errors = main_batch.check_divergence(&shadow_batch); if let Err(err) = errors.into_result() { self.report(err); } @@ -302,63 +512,15 @@ impl DivergenceErrors { } } + fn extend(&mut self, from: Self) { + self.divergences.extend(from.divergences); + } + fn context(mut self, context: String) -> Self { self.context = Some(context); self } - fn check_results_match( - &mut self, - main_result: &VmExecutionResultAndLogs, - shadow_result: &VmExecutionResultAndLogs, - ) { - self.check_match("result", &main_result.result, &shadow_result.result); - self.check_match( - "logs.events", - &main_result.logs.events, - &shadow_result.logs.events, - ); - self.check_match( - "logs.system_l2_to_l1_logs", - &main_result.logs.system_l2_to_l1_logs, - &shadow_result.logs.system_l2_to_l1_logs, - ); - self.check_match( - "logs.user_l2_to_l1_logs", - &main_result.logs.user_l2_to_l1_logs, - &shadow_result.logs.user_l2_to_l1_logs, - ); - let main_logs = UniqueStorageLogs::new(&main_result.logs.storage_logs); - let shadow_logs = UniqueStorageLogs::new(&shadow_result.logs.storage_logs); - self.check_match("logs.storage_logs", &main_logs, &shadow_logs); - self.check_match("refunds", &main_result.refunds, &shadow_result.refunds); - self.check_match( - "statistics.circuit_statistic", - &main_result.statistics.circuit_statistic, - &shadow_result.statistics.circuit_statistic, - ); - self.check_match( - "statistics.pubdata_published", - &main_result.statistics.pubdata_published, - &shadow_result.statistics.pubdata_published, - ); - self.check_match( - "statistics.gas_remaining", - &main_result.statistics.gas_remaining, - &shadow_result.statistics.gas_remaining, - ); - self.check_match( - "statistics.gas_used", - &main_result.statistics.gas_used, - &shadow_result.statistics.gas_used, - ); - self.check_match( - "statistics.computational_gas_used", - &main_result.statistics.computational_gas_used, - &shadow_result.statistics.computational_gas_used, - ); - } - fn check_match(&mut self, context: &str, main: &T, shadow: &T) { if main != shadow { let comparison = pretty_assertions::Comparison::new(main, shadow); @@ -367,47 +529,6 @@ impl DivergenceErrors { } } - fn check_final_states_match( - &mut self, - main: &CurrentExecutionState, - shadow: &CurrentExecutionState, - ) { - self.check_match("final_state.events", &main.events, &shadow.events); - self.check_match( - "final_state.user_l2_to_l1_logs", - &main.user_l2_to_l1_logs, - &shadow.user_l2_to_l1_logs, - ); - self.check_match( - "final_state.system_logs", - &main.system_logs, - &shadow.system_logs, - ); - self.check_match( - "final_state.storage_refunds", - &main.storage_refunds, - &shadow.storage_refunds, - ); - self.check_match( - "final_state.pubdata_costs", - &main.pubdata_costs, - &shadow.pubdata_costs, - ); - self.check_match( - "final_state.used_contract_hashes", - &main.used_contract_hashes.iter().collect::>(), - &shadow.used_contract_hashes.iter().collect::>(), - ); - - let main_deduplicated_logs = Self::gather_logs(&main.deduplicated_storage_logs); - let shadow_deduplicated_logs = Self::gather_logs(&shadow.deduplicated_storage_logs); - self.check_match( - "deduplicated_storage_logs", - &main_deduplicated_logs, - &shadow_deduplicated_logs, - ); - } - fn gather_logs(logs: &[StorageLog]) -> BTreeMap { logs.iter() .filter(|log| log.is_write()) diff --git a/core/lib/vm_interface/src/vm.rs b/core/lib/vm_interface/src/vm.rs index 37e33a92b509..2c25d729e318 100644 --- a/core/lib/vm_interface/src/vm.rs +++ b/core/lib/vm_interface/src/vm.rs @@ -11,26 +11,34 @@ //! Generally speaking, in most cases, the tracer dispatcher is a wrapper around `Vec>`, //! where `VmTracer` is a trait implemented for a specific VM version. +use std::rc::Rc; + use zksync_types::{Transaction, H256}; use crate::{ - storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, + pubdata::PubdataBuilder, storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, + InspectExecutionMode, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, + VmExecutionResultAndLogs, }; pub trait VmInterface { /// Lifetime is used to be able to define `Option<&mut _>` as a dispatcher. type TracerDispatcher: Default; - /// Push transaction to bootloader memory. - fn push_transaction(&mut self, tx: Transaction); + /// Pushes a transaction to bootloader memory for future execution with bytecode compression (if it's supported by the VM). + /// + /// # Return value + /// + /// Returns preprocessing results, such as compressed bytecodes. The results may borrow from the VM state, + /// so you may want to inspect results before next operations with the VM, or clone the necessary parts. + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_>; /// Executes the next VM step (either next transaction or bootloader or the whole batch) /// with custom tracers. fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs; /// Start a new L2 block. @@ -46,13 +54,13 @@ pub trait VmInterface { /// Execute batch till the end and return the result, with final execution state /// and bootloader memory. - fn finish_batch(&mut self) -> FinishedL1Batch; + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch; } /// Extension trait for [`VmInterface`] that provides some additional methods. pub trait VmInterfaceExt: VmInterface { /// Executes the next VM step (either next transaction or bootloader or the whole batch). - fn execute(&mut self, execution_mode: VmExecutionMode) -> VmExecutionResultAndLogs { + fn execute(&mut self, execution_mode: InspectExecutionMode) -> VmExecutionResultAndLogs { self.inspect(&mut ::default(), execution_mode) } diff --git a/core/lib/web3_decl/src/namespaces/eth.rs b/core/lib/web3_decl/src/namespaces/eth.rs index 399773b845dc..40cb6300cffa 100644 --- a/core/lib/web3_decl/src/namespaces/eth.rs +++ b/core/lib/web3_decl/src/namespaces/eth.rs @@ -185,6 +185,9 @@ pub trait EthNamespace { newest_block: BlockNumber, reward_percentiles: Vec, ) -> RpcResult; + + #[method(name = "maxPriorityFeePerGas")] + async fn max_priority_fee_per_gas(&self) -> RpcResult; } #[cfg(feature = "server")] diff --git a/core/lib/web3_decl/src/namespaces/zks.rs b/core/lib/web3_decl/src/namespaces/zks.rs index 47aae2a0835e..07a7cc4ff1c2 100644 --- a/core/lib/web3_decl/src/namespaces/zks.rs +++ b/core/lib/web3_decl/src/namespaces/zks.rs @@ -51,6 +51,9 @@ pub trait ZksNamespace { #[method(name = "getTestnetPaymaster")] async fn get_testnet_paymaster(&self) -> RpcResult>; + #[method(name = "getTimestampAsserter")] + async fn get_timestamp_asserter(&self) -> RpcResult>; + #[method(name = "getBridgeContracts")] async fn get_bridge_contracts(&self) -> RpcResult; diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index 9d399bdd0aff..87fb7ea28f71 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -43,9 +43,6 @@ pub enum Component { EthTxManager, /// State keeper. StateKeeper, - /// Produces input for the TEE verifier. - /// The blob is later used as input for TEE verifier. - TeeVerifierInputProducer, /// Component for housekeeping task such as cleaning blobs from GCS, reporting metrics etc. Housekeeper, /// Component for exposing APIs to prover for providing proof generation data and accepting proofs. @@ -88,9 +85,6 @@ impl FromStr for Components { "tree_api" => Ok(Components(vec![Component::TreeApi])), "state_keeper" => Ok(Components(vec![Component::StateKeeper])), "housekeeper" => Ok(Components(vec![Component::Housekeeper])), - "tee_verifier_input_producer" => { - Ok(Components(vec![Component::TeeVerifierInputProducer])) - } "eth" => Ok(Components(vec![ Component::EthWatcher, Component::EthTxAggregator, diff --git a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs index eb2170bcc848..5faef68507fa 100644 --- a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs +++ b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs @@ -6,7 +6,7 @@ use zksync_config::{ api::{HealthCheckConfig, MerkleTreeApiConfig, Web3JsonRpcConfig}, chain::{ CircuitBreakerConfig, MempoolConfig, NetworkConfig, OperationsManagerConfig, - StateKeeperConfig, + StateKeeperConfig, TimestampAsserterConfig, }, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, @@ -81,6 +81,7 @@ pub struct TempConfigStore { pub external_proof_integration_api_config: Option, pub experimental_vm_config: Option, pub prover_job_monitor_config: Option, + pub timestamp_asserter_config: Option, } impl TempConfigStore { @@ -122,6 +123,7 @@ impl TempConfigStore { .clone(), experimental_vm_config: self.experimental_vm_config.clone(), prover_job_monitor_config: self.prover_job_monitor_config.clone(), + timestamp_asserter_config: self.timestamp_asserter_config.clone(), } } @@ -203,6 +205,7 @@ fn load_env_config() -> anyhow::Result { external_proof_integration_api_config: ExternalProofIntegrationApiConfig::from_env().ok(), experimental_vm_config: ExperimentalVmConfig::from_env().ok(), prover_job_monitor_config: ProverJobMonitorConfig::from_env().ok(), + timestamp_asserter_config: TimestampAsserterConfig::from_env().ok(), }) } diff --git a/core/node/api_server/Cargo.toml b/core/node/api_server/Cargo.toml index 067b9b3e3722..debabb8d3666 100644 --- a/core/node/api_server/Cargo.toml +++ b/core/node/api_server/Cargo.toml @@ -25,7 +25,6 @@ zksync_state.workspace = true zksync_system_constants.workspace = true zksync_metadata_calculator.workspace = true zksync_web3_decl = { workspace = true, features = ["server"] } -zksync_utils.workspace = true zksync_protobuf.workspace = true zksync_mini_merkle_tree.workspace = true zksync_multivm.workspace = true @@ -59,7 +58,7 @@ lru.workspace = true zk_evm_1_5_0.workspace = true zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true +zksync_test_contracts.workspace = true assert_matches.workspace = true -const-decoder.workspace = true test-casing.workspace = true diff --git a/core/node/api_server/src/execution_sandbox/error.rs b/core/node/api_server/src/execution_sandbox/error.rs index 5d63d50a3c85..4523412ae194 100644 --- a/core/node/api_server/src/execution_sandbox/error.rs +++ b/core/node/api_server/src/execution_sandbox/error.rs @@ -26,6 +26,8 @@ pub(crate) enum SandboxExecutionError { that caused this error. Error description: {0}" )] UnexpectedVMBehavior(String), + #[error("Transaction failed block.timestamp assertion")] + FailedBlockTimestampAssertion, } impl From for SandboxExecutionError { @@ -67,6 +69,7 @@ impl From for SandboxExecutionError { Halt::FailedToPublishCompressedBytecodes => { Self::UnexpectedVMBehavior("Failed to publish compressed bytecodes".to_string()) } + Halt::FailedBlockTimestampAssertion => Self::FailedBlockTimestampAssertion, } } } diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index bdd574625888..d58bf6ca38f3 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -8,8 +8,8 @@ use tokio::runtime::Handle; use zksync_dal::{Connection, Core}; use zksync_multivm::interface::{ executor::{OneshotExecutor, TransactionValidator}, - storage::ReadStorage, - tracer::{ValidationError, ValidationParams}, + storage::{ReadStorage, StorageWithOverrides}, + tracer::{TimestampAsserterParams, ValidationError, ValidationParams, ValidationTraces}, Call, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, TransactionExecutionMetrics, TxExecutionArgs, VmExecutionResultAndLogs, }; @@ -20,11 +20,10 @@ use zksync_types::{ use zksync_vm_executor::oneshot::{MainOneshotExecutor, MockOneshotExecutor}; use super::{ - storage::StorageWithOverrides, vm_metrics::{self, SandboxStage}, BlockArgs, VmPermit, SANDBOX_METRICS, }; -use crate::tx_sender::SandboxExecutorOptions; +use crate::{execution_sandbox::storage::apply_state_override, tx_sender::SandboxExecutorOptions}; /// Action that can be executed by [`SandboxExecutor`]. #[derive(Debug)] @@ -100,6 +99,7 @@ pub(crate) struct SandboxExecutor { engine: SandboxExecutorEngine, pub(super) options: SandboxExecutorOptions, storage_caches: Option, + pub(super) timestamp_asserter_params: Option, } impl SandboxExecutor { @@ -107,14 +107,19 @@ impl SandboxExecutor { options: SandboxExecutorOptions, caches: PostgresStorageCaches, missed_storage_invocation_limit: usize, + timestamp_asserter_params: Option, ) -> Self { let mut executor = MainOneshotExecutor::new(missed_storage_invocation_limit); + executor.set_fast_vm_mode(options.fast_vm_mode); + #[cfg(test)] + executor.panic_on_divergence(); executor .set_execution_latency_histogram(&SANDBOX_METRICS.sandbox[&SandboxStage::Execution]); Self { engine: SandboxExecutorEngine::Real(executor), options, storage_caches: Some(caches), + timestamp_asserter_params, } } @@ -130,6 +135,7 @@ impl SandboxExecutor { engine: SandboxExecutorEngine::Mock(executor), options, storage_caches: None, + timestamp_asserter_params: None, } } @@ -151,7 +157,7 @@ impl SandboxExecutor { .await?; let state_override = state_override.unwrap_or_default(); - let storage = StorageWithOverrides::new(storage, &state_override); + let storage = apply_state_override(storage, &state_override); let (execution_args, tracing_params) = action.into_parts(); let result = self .inspect_transaction_with_bytecode_compression( @@ -246,13 +252,13 @@ impl SandboxExecutor { } #[async_trait] -impl OneshotExecutor for SandboxExecutor +impl OneshotExecutor> for SandboxExecutor where S: ReadStorage + Send + 'static, { async fn inspect_transaction_with_bytecode_compression( &self, - storage: S, + storage: StorageWithOverrides, env: OneshotEnv, args: TxExecutionArgs, tracing_params: OneshotTracingParams, @@ -283,17 +289,17 @@ where } #[async_trait] -impl TransactionValidator for SandboxExecutor +impl TransactionValidator> for SandboxExecutor where S: ReadStorage + Send + 'static, { async fn validate_transaction( &self, - storage: S, + storage: StorageWithOverrides, env: OneshotEnv, tx: L2Tx, validation_params: ValidationParams, - ) -> anyhow::Result> { + ) -> anyhow::Result> { match &self.engine { SandboxExecutorEngine::Real(executor) => { executor diff --git a/core/node/api_server/src/execution_sandbox/storage.rs b/core/node/api_server/src/execution_sandbox/storage.rs index bf775d484906..026ac58733a4 100644 --- a/core/node/api_server/src/execution_sandbox/storage.rs +++ b/core/node/api_server/src/execution_sandbox/storage.rs @@ -1,127 +1,66 @@ //! VM storage functionality specifically used in the VM sandbox. -use std::{ - collections::{HashMap, HashSet}, - fmt, -}; - -use zksync_multivm::interface::storage::ReadStorage; +use zksync_multivm::interface::storage::{ReadStorage, StorageWithOverrides}; use zksync_types::{ api::state_override::{OverrideState, StateOverride}, - get_code_key, get_known_code_key, get_nonce_key, + get_code_key, get_known_code_key, get_nonce_key, h256_to_u256, u256_to_h256, utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, - AccountTreeId, StorageKey, StorageValue, H256, + AccountTreeId, StorageKey, H256, }; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -/// A storage view that allows to override some of the storage values. -#[derive(Debug)] -pub(super) struct StorageWithOverrides { - storage_handle: S, - overridden_slots: HashMap, - overridden_factory_deps: HashMap>, - overridden_accounts: HashSet, -} -impl StorageWithOverrides { - /// Creates a new storage view based on the underlying storage. - pub(super) fn new(storage: S, state_override: &StateOverride) -> Self { - let mut this = Self { - storage_handle: storage, - overridden_slots: HashMap::new(), - overridden_factory_deps: HashMap::new(), - overridden_accounts: HashSet::new(), - }; - this.apply_state_override(state_override); - this - } - - fn apply_state_override(&mut self, state_override: &StateOverride) { - for (account, overrides) in state_override.iter() { - if let Some(balance) = overrides.balance { - let balance_key = storage_key_for_eth_balance(account); - self.overridden_slots - .insert(balance_key, u256_to_h256(balance)); - } +/// This method is blocking. +pub(super) fn apply_state_override( + storage: S, + state_override: &StateOverride, +) -> StorageWithOverrides { + let mut storage = StorageWithOverrides::new(storage); + for (account, overrides) in state_override.iter() { + if let Some(balance) = overrides.balance { + let balance_key = storage_key_for_eth_balance(account); + storage.set_value(balance_key, u256_to_h256(balance)); + } - if let Some(nonce) = overrides.nonce { - let nonce_key = get_nonce_key(account); - let full_nonce = self.read_value(&nonce_key); - let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); - let new_full_nonce = u256_to_h256(nonces_to_full_nonce(nonce, deployment_nonce)); - self.overridden_slots.insert(nonce_key, new_full_nonce); - } + if let Some(nonce) = overrides.nonce { + let nonce_key = get_nonce_key(account); + let full_nonce = storage.read_value(&nonce_key); + let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); + let new_full_nonce = u256_to_h256(nonces_to_full_nonce(nonce, deployment_nonce)); + storage.set_value(nonce_key, new_full_nonce); + } - if let Some(code) = &overrides.code { - let code_key = get_code_key(account); - let code_hash = code.hash(); - self.overridden_slots.insert(code_key, code_hash); - let known_code_key = get_known_code_key(&code_hash); - self.overridden_slots - .insert(known_code_key, H256::from_low_u64_be(1)); - self.store_factory_dep(code_hash, code.clone().into_bytes()); - } + if let Some(code) = &overrides.code { + let code_key = get_code_key(account); + let code_hash = code.hash(); + storage.set_value(code_key, code_hash); + let known_code_key = get_known_code_key(&code_hash); + storage.set_value(known_code_key, H256::from_low_u64_be(1)); + storage.store_factory_dep(code_hash, code.clone().into_bytes()); + } - match &overrides.state { - Some(OverrideState::State(state)) => { - let account = AccountTreeId::new(*account); - self.override_account_state_diff(account, state); - self.overridden_accounts.insert(account); + match &overrides.state { + Some(OverrideState::State(state)) => { + let account = AccountTreeId::new(*account); + for (&key, &value) in state { + storage.set_value(StorageKey::new(account, key), value); } - Some(OverrideState::StateDiff(state_diff)) => { - let account = AccountTreeId::new(*account); - self.override_account_state_diff(account, state_diff); + storage.insert_erased_account(account); + } + Some(OverrideState::StateDiff(state_diff)) => { + let account = AccountTreeId::new(*account); + for (&key, &value) in state_diff { + storage.set_value(StorageKey::new(account, key), value); } - None => { /* do nothing */ } } + None => { /* do nothing */ } } } - - fn store_factory_dep(&mut self, hash: H256, code: Vec) { - self.overridden_factory_deps.insert(hash, code); - } - - fn override_account_state_diff( - &mut self, - account: AccountTreeId, - state_diff: &HashMap, - ) { - let account_slots = state_diff - .iter() - .map(|(&slot, &value)| (StorageKey::new(account, slot), value)); - self.overridden_slots.extend(account_slots); - } -} - -impl ReadStorage for StorageWithOverrides { - fn read_value(&mut self, key: &StorageKey) -> StorageValue { - if let Some(value) = self.overridden_slots.get(key) { - return *value; - } - if self.overridden_accounts.contains(key.account()) { - return H256::zero(); - } - self.storage_handle.read_value(key) - } - - fn is_write_initial(&mut self, key: &StorageKey) -> bool { - self.storage_handle.is_write_initial(key) - } - - fn load_factory_dep(&mut self, hash: H256) -> Option> { - self.overridden_factory_deps - .get(&hash) - .cloned() - .or_else(|| self.storage_handle.load_factory_dep(hash)) - } - - fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { - self.storage_handle.get_enumeration_index(key) - } + storage } #[cfg(test)] mod tests { + use std::collections::HashMap; + use zksync_multivm::interface::storage::InMemoryStorage; use zksync_types::{ api::state_override::{Bytecode, OverrideAccount}, @@ -184,7 +123,7 @@ mod tests { storage.set_value(retained_key, H256::repeat_byte(0xfe)); let erased_key = StorageKey::new(AccountTreeId::new(Address::repeat_byte(5)), H256::zero()); storage.set_value(erased_key, H256::repeat_byte(1)); - let mut storage = StorageWithOverrides::new(storage, &overrides); + let mut storage = apply_state_override(storage, &overrides); let balance = storage.read_value(&storage_key_for_eth_balance(&Address::repeat_byte(1))); assert_eq!(balance, H256::from_low_u64_be(1)); diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index e342f2d73de9..0aff15b973e0 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -217,6 +217,7 @@ async fn test_instantiating_vm(connection: Connection<'static, Core>, block_args SandboxExecutorOptions::mock().await, PostgresStorageCaches::new(1, 1), usize::MAX, + None, ); let fee_input = BatchFeeInput::l1_pegged(55, 555); @@ -265,6 +266,7 @@ async fn validating_transaction(set_balance: bool) { SandboxExecutorOptions::mock().await, PostgresStorageCaches::new(1, 1), usize::MAX, + None, ); let fee_input = BatchFeeInput::l1_pegged(55, 555); diff --git a/core/node/api_server/src/execution_sandbox/validate.rs b/core/node/api_server/src/execution_sandbox/validate.rs index 9a3c88f8bf0c..3d58f807a89a 100644 --- a/core/node/api_server/src/execution_sandbox/validate.rs +++ b/core/node/api_server/src/execution_sandbox/validate.rs @@ -5,16 +5,18 @@ use tracing::Instrument; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::interface::{ executor::TransactionValidator, - tracer::{ValidationError as RawValidationError, ValidationParams}, + storage::StorageWithOverrides, + tracer::{ + TimestampAsserterParams, ValidationError as RawValidationError, ValidationParams, + ValidationTraces, + }, }; use zksync_types::{ - api::state_override::StateOverride, fee_model::BatchFeeInput, l2::L2Tx, Address, - TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS, + fee_model::BatchFeeInput, l2::L2Tx, Address, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS, }; use super::{ execute::{SandboxAction, SandboxExecutor}, - storage::StorageWithOverrides, vm_metrics::{SandboxStage, EXECUTION_METRICS, SANDBOX_METRICS}, BlockArgs, VmPermit, }; @@ -39,13 +41,14 @@ impl SandboxExecutor { block_args: BlockArgs, fee_input: BatchFeeInput, whitelisted_tokens_for_aa: &[Address], - ) -> Result<(), ValidationError> { + ) -> Result { let total_latency = SANDBOX_METRICS.sandbox[&SandboxStage::ValidateInSandbox].start(); let validation_params = get_validation_params( &mut connection, &tx, self.options.eth_call.validation_computational_gas_limit(), whitelisted_tokens_for_aa, + self.timestamp_asserter_params.clone(), ) .await .context("failed getting validation params")?; @@ -57,7 +60,7 @@ impl SandboxExecutor { let SandboxAction::Execution { tx, .. } = action else { unreachable!(); // by construction }; - let storage = StorageWithOverrides::new(storage, &StateOverride::default()); + let storage = StorageWithOverrides::new(storage); let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Validation].start(); let validation_result = self @@ -80,6 +83,7 @@ pub(super) async fn get_validation_params( tx: &L2Tx, computational_gas_limit: u32, whitelisted_tokens_for_aa: &[Address], + timestamp_asserter_params: Option, ) -> anyhow::Result { let method_latency = EXECUTION_METRICS.get_validation_params.start(); let user_address = tx.common_data.initiator_address; @@ -126,5 +130,6 @@ pub(super) async fn get_validation_params( trusted_addresses, trusted_address_slots, computational_gas_limit, + timestamp_asserter_params, }) } diff --git a/core/node/api_server/src/execution_sandbox/vm_metrics.rs b/core/node/api_server/src/execution_sandbox/vm_metrics.rs index 613475b6ef92..282d9bdf1b77 100644 --- a/core/node/api_server/src/execution_sandbox/vm_metrics.rs +++ b/core/node/api_server/src/execution_sandbox/vm_metrics.rs @@ -7,8 +7,7 @@ use zksync_multivm::{ interface::{TransactionExecutionMetrics, VmEvent, VmExecutionResultAndLogs}, utils::StorageWritesDeduplicator, }; -use zksync_types::H256; -use zksync_utils::bytecode::bytecode_len_in_bytes; +use zksync_types::{bytecode::BytecodeHash, H256}; use crate::utils::ReportFilter; @@ -149,7 +148,11 @@ pub(super) fn collect_tx_execution_metrics( .sum(); let published_bytecode_bytes = VmEvent::extract_published_bytecodes(&result.logs.events) .iter() - .map(|bytecode_hash| bytecode_len_in_bytes(*bytecode_hash)) + .map(|&bytecode_hash| { + BytecodeHash::try_from(bytecode_hash) + .expect("published unparseable bytecode hash") + .len_in_bytes() + }) .sum(); TransactionExecutionMetrics { diff --git a/core/node/api_server/src/testonly.rs b/core/node/api_server/src/testonly.rs index 13e5ecc08ead..06b31427ed61 100644 --- a/core/node/api_server/src/testonly.rs +++ b/core/node/api_server/src/testonly.rs @@ -2,63 +2,29 @@ use std::{collections::HashMap, iter}; -use const_decoder::Decoder; use zk_evm_1_5_0::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; -use zksync_contracts::{ - eth_contract, get_loadnext_contract, load_contract, read_bytecode, - test_contracts::LoadnextContractExecutionParams, -}; +use zksync_contracts::{eth_contract, load_contract, read_bytecode}; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::utils::derive_base_fee_and_gas_per_pubdata; -use zksync_node_fee_model::BatchFeeModelInputProvider; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; +use zksync_system_constants::{L2_BASE_TOKEN_ADDRESS, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE}; +use zksync_test_contracts::{LoadnextContractExecutionParams, TestContract}; use zksync_types::{ + address_to_u256, api::state_override::{Bytecode, OverrideAccount, OverrideState, StateOverride}, ethabi, ethabi::Token, fee::Fee, fee_model::FeeParams, get_code_key, get_known_code_key, + l1::L1Tx, l2::L2Tx, - transaction_request::{CallRequest, PaymasterParams}, + transaction_request::{CallRequest, Eip712Meta, PaymasterParams}, + u256_to_h256, utils::storage_key_for_eth_balance, AccountTreeId, Address, K256PrivateKey, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, - StorageKey, StorageLog, H256, U256, + StorageKey, StorageLog, EIP_712_TX_TYPE, H256, U256, }; -use zksync_utils::{address_to_u256, u256_to_h256}; - -pub(crate) const RAW_EVM_BYTECODE: &[u8] = &const_decoder::decode!( - Decoder::Hex, - b"00000000000000000000000000000000000000000000000000000000000001266080604052348015\ - 600e575f80fd5b50600436106030575f3560e01c8063816898ff146034578063fb5343f314604c57\ - 5b5f80fd5b604a60048036038101906046919060a6565b6066565b005b6052606f565b604051605d\ - 919060d9565b60405180910390f35b805f8190555050565b5f5481565b5f80fd5b5f819050919050\ - 565b6088816078565b81146091575f80fd5b50565b5f8135905060a0816081565b92915050565b5f\ - 6020828403121560b85760b76074565b5b5f60c3848285016094565b91505092915050565b60d381\ - 6078565b82525050565b5f60208201905060ea5f83018460cc565b9291505056fea2646970667358\ - 221220caca1247066da378f2ec77c310f2ae51576272367b4fa11cc4350af4e9ce4d0964736f6c63\ - 4300081a00330000000000000000000000000000000000000000000000000000" -); -pub(crate) const PROCESSED_EVM_BYTECODE: &[u8] = &const_decoder::decode!( - Decoder::Hex, - b"6080604052348015600e575f80fd5b50600436106030575f3560e01c8063816898ff146034578063\ - fb5343f314604c575b5f80fd5b604a60048036038101906046919060a6565b6066565b005b605260\ - 6f565b604051605d919060d9565b60405180910390f35b805f8190555050565b5f5481565b5f80fd\ - 5b5f819050919050565b6088816078565b81146091575f80fd5b50565b5f8135905060a081608156\ - 5b92915050565b5f6020828403121560b85760b76074565b5b5f60c3848285016094565b91505092\ - 915050565b60d3816078565b82525050565b5f60208201905060ea5f83018460cc565b9291505056\ - fea2646970667358221220caca1247066da378f2ec77c310f2ae51576272367b4fa11cc4350af4e9\ - ce4d0964736f6c634300081a0033" -); - -const EXPENSIVE_CONTRACT_PATH: &str = - "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; -const PRECOMPILES_CONTRACT_PATH: &str = - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json"; -const COUNTER_CONTRACT_PATH: &str = - "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json"; -const INFINITE_LOOP_CONTRACT_PATH: &str = - "etc/contracts-test-data/artifacts-zk/contracts/infinite/infinite.sol/InfiniteLoop.json"; + const MULTICALL3_CONTRACT_PATH: &str = "contracts/l2-contracts/zkout/Multicall3.sol/Multicall3.json"; @@ -72,11 +38,7 @@ fn inflate_bytecode(bytecode: &mut Vec, nop_count: usize) { } fn default_fee() -> Fee { - let fee_input = ::default_batch_fee_input_scaled( - FeeParams::sensible_v1_default(), - 1.0, - 1.0, - ); + let fee_input = FeeParams::sensible_v1_default().scale(1.0, 1.0); let (max_fee_per_gas, gas_per_pubdata_limit) = derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::default().into()); Fee { @@ -126,7 +88,7 @@ impl StateBuilder { self.inner.insert( Self::LOAD_TEST_ADDRESS, OverrideAccount { - code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), + code: Some(Bytecode::new(TestContract::load_test().bytecode.to_vec()).unwrap()), state: Some(OverrideState::State(state)), ..OverrideAccount::default() }, @@ -142,21 +104,21 @@ impl StateBuilder { pub fn with_expensive_contract(self) -> Self { self.with_contract( Self::EXPENSIVE_CONTRACT_ADDRESS, - read_bytecode(EXPENSIVE_CONTRACT_PATH), + TestContract::expensive().bytecode.to_vec(), ) } pub fn with_precompiles_contract(self) -> Self { self.with_contract( Self::PRECOMPILES_CONTRACT_ADDRESS, - read_bytecode(PRECOMPILES_CONTRACT_PATH), + TestContract::precompiles_test().bytecode.to_vec(), ) } pub fn with_counter_contract(self, initial_value: u64) -> Self { let mut this = self.with_contract( Self::COUNTER_CONTRACT_ADDRESS, - read_bytecode(COUNTER_CONTRACT_PATH), + TestContract::counter().bytecode.to_vec(), ); if initial_value != 0 { let state = HashMap::from([(H256::zero(), H256::from_low_u64_be(initial_value))]); @@ -171,7 +133,7 @@ impl StateBuilder { pub fn with_infinite_loop_contract(self) -> Self { self.with_contract( Self::INFINITE_LOOP_CONTRACT_ADDRESS, - read_bytecode(INFINITE_LOOP_CONTRACT_PATH), + TestContract::infinite_loop().bytecode.to_vec(), ) } @@ -348,6 +310,8 @@ pub(crate) trait TestAccount { fn create_counter_tx(&self, increment: U256, revert: bool) -> L2Tx; + fn create_l1_counter_tx(&self, increment: U256, revert: bool) -> L1Tx; + fn query_counter_value(&self) -> CallRequest; fn create_infinite_loop_tx(&self) -> L2Tx; @@ -395,7 +359,7 @@ impl TestAccount for K256PrivateKey { L2ChainId::default(), self, if params.deploys > 0 { - get_loadnext_contract().factory_deps + TestContract::load_test().factory_deps() } else { vec![] }, @@ -405,9 +369,8 @@ impl TestAccount for K256PrivateKey { } fn create_expensive_tx(&self, write_count: usize) -> L2Tx { - let calldata = load_contract(EXPENSIVE_CONTRACT_PATH) + let calldata = TestContract::expensive() .function("expensive") - .expect("no `expensive` function in contract") .encode_input(&[Token::Uint(write_count.into())]) .expect("failed encoding `expensive` function"); L2Tx::new_signed( @@ -425,9 +388,8 @@ impl TestAccount for K256PrivateKey { } fn create_expensive_cleanup_tx(&self) -> L2Tx { - let calldata = load_contract(EXPENSIVE_CONTRACT_PATH) + let calldata = TestContract::expensive() .function("cleanUp") - .expect("no `cleanUp` function in contract") .encode_input(&[]) .expect("failed encoding `cleanUp` input"); L2Tx::new_signed( @@ -445,9 +407,8 @@ impl TestAccount for K256PrivateKey { } fn create_code_oracle_tx(&self, bytecode_hash: H256, expected_keccak_hash: H256) -> L2Tx { - let calldata = load_contract(PRECOMPILES_CONTRACT_PATH) + let calldata = TestContract::precompiles_test() .function("callCodeOracle") - .expect("no `callCodeOracle` function") .encode_input(&[ Token::FixedBytes(bytecode_hash.0.to_vec()), Token::FixedBytes(expected_keccak_hash.0.to_vec()), @@ -468,9 +429,8 @@ impl TestAccount for K256PrivateKey { } fn create_counter_tx(&self, increment: U256, revert: bool) -> L2Tx { - let calldata = load_contract(COUNTER_CONTRACT_PATH) + let calldata = TestContract::counter() .function("incrementWithRevert") - .expect("no `incrementWithRevert` function") .encode_input(&[Token::Uint(increment), Token::Bool(revert)]) .expect("failed encoding `incrementWithRevert` input"); L2Tx::new_signed( @@ -487,10 +447,28 @@ impl TestAccount for K256PrivateKey { .unwrap() } + fn create_l1_counter_tx(&self, increment: U256, revert: bool) -> L1Tx { + let calldata = TestContract::counter() + .function("incrementWithRevert") + .encode_input(&[Token::Uint(increment), Token::Bool(revert)]) + .expect("failed encoding `incrementWithRevert` input"); + let request = CallRequest { + data: Some(calldata.into()), + from: Some(self.address()), + to: Some(StateBuilder::COUNTER_CONTRACT_ADDRESS), + transaction_type: Some(EIP_712_TX_TYPE.into()), + eip712_meta: Some(Eip712Meta { + gas_per_pubdata: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), + ..Eip712Meta::default() + }), + ..CallRequest::default() + }; + L1Tx::from_request(request, false).unwrap() + } + fn query_counter_value(&self) -> CallRequest { - let calldata = load_contract(COUNTER_CONTRACT_PATH) + let calldata = TestContract::counter() .function("get") - .expect("no `get` function") .encode_input(&[]) .expect("failed encoding `get` input"); CallRequest { @@ -502,9 +480,8 @@ impl TestAccount for K256PrivateKey { } fn create_infinite_loop_tx(&self) -> L2Tx { - let calldata = load_contract(INFINITE_LOOP_CONTRACT_PATH) + let calldata = TestContract::infinite_loop() .function("infiniteLoop") - .expect("no `infiniteLoop` function") .encode_input(&[]) .expect("failed encoding `infiniteLoop` input"); L2Tx::new_signed( diff --git a/core/node/api_server/src/tx_sender/master_pool_sink.rs b/core/node/api_server/src/tx_sender/master_pool_sink.rs index 736edf0b2475..06333f0c1369 100644 --- a/core/node/api_server/src/tx_sender/master_pool_sink.rs +++ b/core/node/api_server/src/tx_sender/master_pool_sink.rs @@ -2,7 +2,7 @@ use std::collections::hash_map::{Entry, HashMap}; use tokio::sync::Mutex; use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool, Core, CoreDal}; -use zksync_multivm::interface::TransactionExecutionMetrics; +use zksync_multivm::interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_types::{l2::L2Tx, Address, Nonce, H256}; @@ -31,6 +31,7 @@ impl TxSink for MasterPoolSink { &self, tx: &L2Tx, execution_metrics: TransactionExecutionMetrics, + validation_traces: ValidationTraces, ) -> Result { let address_and_nonce = (tx.initiator_account(), tx.nonce()); @@ -55,7 +56,7 @@ impl TxSink for MasterPoolSink { let result = match self.master_pool.connection_tagged("api").await { Ok(mut connection) => connection .transactions_dal() - .insert_transaction_l2(tx, execution_metrics) + .insert_transaction_l2(tx, execution_metrics, validation_traces) .await .inspect(|submission_res_handle| { APP_METRICS.processed_txs[&TxStage::Mempool(*submission_res_handle)].inc(); diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 38794fe71371..91fb84ab8f17 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -1,6 +1,6 @@ //! Helper module to submit transactions into the ZKsync Network. -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use anyhow::Context as _; use tokio::sync::RwLock; @@ -9,7 +9,10 @@ use zksync_dal::{ transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, Core, CoreDal, }; use zksync_multivm::{ - interface::{OneshotTracingParams, TransactionExecutionMetrics, VmExecutionResultAndLogs}, + interface::{ + tracer::TimestampAsserterParams as TracerTimestampAsserterParams, OneshotTracingParams, + TransactionExecutionMetrics, VmExecutionResultAndLogs, + }, utils::{derive_base_fee_and_gas_per_pubdata, get_max_batch_gas_limit}, }; use zksync_node_fee_model::{ApiFeeInputProvider, BatchFeeModelInputProvider}; @@ -21,16 +24,16 @@ use zksync_state_keeper::{ use zksync_types::{ api::state_override::StateOverride, fee_model::BatchFeeInput, - get_intrinsic_constants, + get_intrinsic_constants, h256_to_u256, l2::{error::TxCheckError::TxDuplication, L2Tx}, transaction_request::CallOverrides, utils::storage_key_for_eth_balance, + vm::FastVmMode, AccountTreeId, Address, L2ChainId, Nonce, ProtocolVersionId, Transaction, H160, H256, MAX_NEW_FACTORY_DEPS, U256, }; -use zksync_utils::h256_to_u256; use zksync_vm_executor::oneshot::{ - CallOrExecute, EstimateGas, MultiVMBaseSystemContracts, OneshotEnvParameters, + CallOrExecute, EstimateGas, MultiVmBaseSystemContracts, OneshotEnvParameters, }; pub(super) use self::{gas_estimation::BinarySearchKind, result::SubmitTxError}; @@ -89,6 +92,7 @@ pub async fn build_tx_sender( /// Oneshot executor options used by the API server sandbox. #[derive(Debug)] pub struct SandboxExecutorOptions { + pub(crate) fast_vm_mode: FastVmMode, /// Env parameters to be used when estimating gas. pub(crate) estimate_gas: OneshotEnvParameters, /// Env parameters to be used when performing `eth_call` requests. @@ -105,15 +109,16 @@ impl SandboxExecutorOptions { validation_computational_gas_limit: u32, ) -> anyhow::Result { let estimate_gas_contracts = - tokio::task::spawn_blocking(MultiVMBaseSystemContracts::load_estimate_gas_blocking) + tokio::task::spawn_blocking(MultiVmBaseSystemContracts::load_estimate_gas_blocking) .await .context("failed loading base contracts for gas estimation")?; let call_contracts = - tokio::task::spawn_blocking(MultiVMBaseSystemContracts::load_eth_call_blocking) + tokio::task::spawn_blocking(MultiVmBaseSystemContracts::load_eth_call_blocking) .await .context("failed loading base contracts for calls / tx execution")?; Ok(Self { + fast_vm_mode: FastVmMode::Old, estimate_gas: OneshotEnvParameters::new( Arc::new(estimate_gas_contracts), chain_id, @@ -129,6 +134,11 @@ impl SandboxExecutorOptions { }) } + /// Sets the fast VM mode used by this executor. + pub fn set_fast_vm_mode(&mut self, fast_vm_mode: FastVmMode) { + self.fast_vm_mode = fast_vm_mode; + } + pub(crate) async fn mock() -> Self { Self::new(L2ChainId::default(), AccountTreeId::default(), u32::MAX) .await @@ -197,6 +207,12 @@ impl TxSenderBuilder { executor_options, storage_caches, missed_storage_invocation_limit, + self.config.timestamp_asserter_params.clone().map(|params| { + TracerTimestampAsserterParams { + address: params.address, + min_time_till_end: params.min_time_till_end, + } + }), ); TxSender(Arc::new(TxSenderInner { @@ -226,6 +242,13 @@ pub struct TxSenderConfig { pub validation_computational_gas_limit: u32, pub chain_id: L2ChainId, pub whitelisted_tokens_for_aa: Vec
, + pub timestamp_asserter_params: Option, +} + +#[derive(Debug, Clone)] +pub struct TimestampAsserterParams { + pub address: Address, + pub min_time_till_end: Duration, } impl TxSenderConfig { @@ -234,6 +257,7 @@ impl TxSenderConfig { web3_json_config: &Web3JsonRpcConfig, fee_account_addr: Address, chain_id: L2ChainId, + timestamp_asserter_params: Option, ) -> Self { Self { fee_account_addr, @@ -245,6 +269,7 @@ impl TxSenderConfig { .validation_computational_gas_limit, chain_id, whitelisted_tokens_for_aa: web3_json_config.whitelisted_tokens_for_aa.clone(), + timestamp_asserter_params, } } } @@ -353,14 +378,15 @@ impl TxSender { if !execution_output.are_published_bytecodes_ok { return Err(SubmitTxError::FailedToPublishCompressedBytecodes); } - let mut stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::DbInsert); self.ensure_tx_executable(&tx.clone().into(), &execution_output.metrics, true)?; + + let validation_traces = validation_result?; let submission_res_handle = self .0 .tx_sink - .submit_tx(&tx, execution_output.metrics) + .submit_tx(&tx, execution_output.metrics, validation_traces) .await?; match submission_res_handle { diff --git a/core/node/api_server/src/tx_sender/proxy.rs b/core/node/api_server/src/tx_sender/proxy.rs index 536a9767c1f2..bba462404cf0 100644 --- a/core/node/api_server/src/tx_sender/proxy.rs +++ b/core/node/api_server/src/tx_sender/proxy.rs @@ -11,7 +11,7 @@ use zksync_dal::{ helpers::wait_for_l1_batch, transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, Core, CoreDal, DalError, }; -use zksync_multivm::interface::TransactionExecutionMetrics; +use zksync_multivm::interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_types::{api, l2::L2Tx, Address, Nonce, H256, U256}; use zksync_web3_decl::{ @@ -309,6 +309,7 @@ impl TxSink for TxProxy { &self, tx: &L2Tx, _execution_metrics: TransactionExecutionMetrics, + _validation_traces: ValidationTraces, ) -> Result { // We're running an external node: we have to proxy the transaction to the main node. // But before we do that, save the tx to cache in case someone will request it @@ -416,7 +417,11 @@ mod tests { let proxy = TxProxy::new(Box::new(main_node_client)); proxy - .submit_tx(&tx, TransactionExecutionMetrics::default()) + .submit_tx( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); assert!(send_tx_called.load(Ordering::Relaxed)); @@ -525,7 +530,11 @@ mod tests { let proxy = TxProxy::new(Box::new(main_node_client)); proxy - .submit_tx(&tx, TransactionExecutionMetrics::default()) + .submit_tx( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap_err(); @@ -585,7 +594,11 @@ mod tests { // Add transaction to the cache let proxy = TxProxy::new(Box::new(main_node_client)); proxy - .submit_tx(&tx, TransactionExecutionMetrics::default()) + .submit_tx( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); assert_eq!(proxy.tx_cache.get(tx.hash()).await.unwrap(), tx); @@ -662,15 +675,27 @@ mod tests { .build(); let proxy = TxProxy::new(Box::new(main_node_client)); proxy - .submit_tx(&tx, TransactionExecutionMetrics::default()) + .submit_tx( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); proxy - .submit_tx(&replacing_tx, TransactionExecutionMetrics::default()) + .submit_tx( + &replacing_tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); proxy - .submit_tx(&future_tx, TransactionExecutionMetrics::default()) + .submit_tx( + &future_tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); { diff --git a/core/node/api_server/src/tx_sender/result.rs b/core/node/api_server/src/tx_sender/result.rs index e2a51ae8e9a8..cbc55a73c7ce 100644 --- a/core/node/api_server/src/tx_sender/result.rs +++ b/core/node/api_server/src/tx_sender/result.rs @@ -67,6 +67,8 @@ pub enum SubmitTxError { /// Catch-all internal error (e.g., database error) that should not be exposed to the caller. #[error("internal error")] Internal(#[from] anyhow::Error), + #[error("transaction failed block.timestamp assertion")] + FailedBlockTimestampAssertion, } impl SubmitTxError { @@ -96,6 +98,7 @@ impl SubmitTxError { Self::MintedAmountOverflow => "minted-amount-overflow", Self::ProxyError(_) => "proxy-error", Self::Internal(_) => "internal", + Self::FailedBlockTimestampAssertion => "failed-block-timestamp-assertion", } } @@ -133,6 +136,9 @@ impl From for SubmitTxError { SandboxExecutionError::FailedToPayForTransaction(reason) => { Self::FailedToChargeFee(reason) } + SandboxExecutionError::FailedBlockTimestampAssertion => { + Self::FailedBlockTimestampAssertion + } } } } diff --git a/core/node/api_server/src/tx_sender/tests/call.rs b/core/node/api_server/src/tx_sender/tests/call.rs index e43f55b2b9af..08571790e8eb 100644 --- a/core/node/api_server/src/tx_sender/tests/call.rs +++ b/core/node/api_server/src/tx_sender/tests/call.rs @@ -238,7 +238,8 @@ async fn eth_call_with_load_test_transactions() { }, LoadnextContractExecutionParams { reads: 100, - writes: 100, + initial_writes: 100, + repeated_writes: 100, ..LoadnextContractExecutionParams::empty() }, ]; diff --git a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs index 4528d9cda12f..954792f915cc 100644 --- a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs @@ -7,10 +7,10 @@ use test_casing::{test_casing, Product}; use zksync_system_constants::CODE_ORACLE_ADDRESS; use zksync_types::{ api::state_override::{OverrideAccount, OverrideState}, + bytecode::BytecodeHash, web3::keccak256, K256PrivateKey, }; -use zksync_utils::bytecode::hash_bytecode; use super::*; use crate::{ @@ -74,6 +74,28 @@ async fn initial_estimate_for_load_test_transaction(tx_params: LoadnextContractE test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; } +#[tokio::test] +async fn initial_gas_estimate_for_l1_transaction() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + let tx = alice.create_l1_counter_tx(1.into(), false); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + let initial_estimate = estimator.initialize().await.unwrap(); + assert!(initial_estimate.total_gas_charged.is_none()); + + let (vm_result, _) = estimator.unadjusted_step(15_000).await.unwrap(); + assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); + let (vm_result, _) = estimator.unadjusted_step(1_000_000).await.unwrap(); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + #[test_casing(2, [false, true])] #[tokio::test] async fn initial_estimate_for_deep_recursion(with_reads: bool) { @@ -94,7 +116,7 @@ async fn initial_estimate_for_deep_recursion(with_reads: bool) { (75, 1.2), (100, 1.4), (125, 1.7), - (150, 2.1), + (150, 2.2), ] }; for &(recursion_depth, multiplier) in depths_and_multipliers { @@ -194,7 +216,7 @@ async fn initial_estimate_for_code_oracle_tx() { // Add another contract that is never executed, but has a large bytecode. let huge_contact_address = Address::repeat_byte(23); let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; - let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); + let huge_contract_bytecode_hash = BytecodeHash::for_bytecode(&huge_contract_bytecode).value(); let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); let state_override = StateBuilder::default() @@ -218,7 +240,7 @@ async fn initial_estimate_for_code_oracle_tx() { (*contract.account_id.address() == CODE_ORACLE_ADDRESS).then_some(&contract.bytecode) }) .expect("no code oracle"); - let code_oracle_bytecode_hash = hash_bytecode(code_oracle_bytecode); + let code_oracle_bytecode_hash = BytecodeHash::for_bytecode(code_oracle_bytecode).value(); let code_oracle_keccak_hash = H256(keccak256(code_oracle_bytecode)); let warm_bytecode_hashes = [ @@ -322,9 +344,10 @@ async fn insufficient_funds_error_for_transfer() { async fn test_estimating_gas( state_override: StateOverride, - tx: L2Tx, + tx: impl Into, acceptable_overestimation: u64, ) { + let tx = tx.into(); let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; let block_args = pending_block_args(&tx_sender).await; @@ -332,7 +355,7 @@ async fn test_estimating_gas( let fee_scale_factor = 1.0; let fee = tx_sender .get_txs_fee_in_wei( - tx.clone().into(), + tx.clone(), block_args.clone(), fee_scale_factor, acceptable_overestimation, @@ -350,7 +373,7 @@ async fn test_estimating_gas( let fee = tx_sender .get_txs_fee_in_wei( - tx.into(), + tx, block_args, fee_scale_factor, acceptable_overestimation, @@ -383,6 +406,15 @@ async fn estimating_gas_for_transfer(acceptable_overestimation: u64) { test_estimating_gas(state_override, tx, acceptable_overestimation).await; } +#[tokio::test] +async fn estimating_gas_for_l1_transaction() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + let tx = alice.create_l1_counter_tx(1.into(), false); + + test_estimating_gas(state_override, tx, 0).await; +} + #[test_casing(10, Product((LOAD_TEST_CASES, [0, 100])))] #[tokio::test] async fn estimating_gas_for_load_test_tx( @@ -412,7 +444,7 @@ async fn estimating_gas_for_code_oracle_tx() { // Add another contract that is never executed, but has a large bytecode. let huge_contact_address = Address::repeat_byte(23); let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; - let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); + let huge_contract_bytecode_hash = BytecodeHash::for_bytecode(&huge_contract_bytecode).value(); let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); let state_override = StateBuilder::default() diff --git a/core/node/api_server/src/tx_sender/tests/mod.rs b/core/node/api_server/src/tx_sender/tests/mod.rs index cacd616202d2..014bc5636c2d 100644 --- a/core/node/api_server/src/tx_sender/tests/mod.rs +++ b/core/node/api_server/src/tx_sender/tests/mod.rs @@ -1,9 +1,9 @@ //! Tests for the transaction sender. use test_casing::TestCases; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::{create_l2_block, prepare_recovery_snapshot}; +use zksync_test_contracts::LoadnextContractExecutionParams; use zksync_types::{get_nonce_key, L1BatchNumber, L2BlockNumber, StorageLog}; use zksync_vm_executor::oneshot::MockOneshotExecutor; @@ -18,7 +18,8 @@ const LOAD_TEST_CASES: TestCases = test_casing: LoadnextContractExecutionParams::default(), // No storage modification LoadnextContractExecutionParams { - writes: 0, + initial_writes: 0, + repeated_writes: 0, events: 0, ..LoadnextContractExecutionParams::default() }, @@ -145,16 +146,17 @@ async fn create_real_tx_sender(pool: ConnectionPool) -> TxSender { drop(storage); let genesis_config = genesis_params.config(); - let executor_options = SandboxExecutorOptions::new( + let mut executor_options = SandboxExecutorOptions::new( genesis_config.l2_chain_id, AccountTreeId::new(genesis_config.fee_account), u32::MAX, ) .await .unwrap(); + executor_options.set_fast_vm_mode(FastVmMode::Shadow); let pg_caches = PostgresStorageCaches::new(1, 1); - let tx_executor = SandboxExecutor::real(executor_options, pg_caches, usize::MAX); + let tx_executor = SandboxExecutor::real(executor_options, pg_caches, usize::MAX, None); create_test_tx_sender(pool, genesis_params.config().l2_chain_id, tx_executor) .await .0 diff --git a/core/node/api_server/src/tx_sender/tests/send_tx.rs b/core/node/api_server/src/tx_sender/tests/send_tx.rs index fdd63254cf07..c861f04a832e 100644 --- a/core/node/api_server/src/tx_sender/tests/send_tx.rs +++ b/core/node/api_server/src/tx_sender/tests/send_tx.rs @@ -1,8 +1,11 @@ //! Tests for sending raw transactions. +use std::ops::Range; + use assert_matches::assert_matches; +use chrono::NaiveDateTime; use test_casing::test_casing; -use zksync_multivm::interface::ExecutionResult; +use zksync_multivm::interface::{tracer::ValidationTraces, ExecutionResult}; use zksync_node_fee_model::MockBatchFeeParamsProvider; use zksync_node_test_utils::create_l2_transaction; use zksync_types::K256PrivateKey; @@ -54,6 +57,16 @@ async fn submitting_tx_requires_one_connection() { .await .unwrap() .expect("transaction is not persisted"); + + let storage_tx = storage + .transactions_dal() + .get_storage_tx_by_hash(tx_hash) + .await + .unwrap() + .expect("transaction is not persisted"); + // verify that no validation traces have been persisted + assert!(storage_tx.timestamp_asserter_range_start.is_none()); + assert!(storage_tx.timestamp_asserter_range_start.is_none()); } #[tokio::test] @@ -298,3 +311,88 @@ async fn sending_transaction_out_of_gas() { let (_, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); assert_matches!(vm_result.result, ExecutionResult::Revert { .. }); } + +async fn submit_tx_with_validation_traces(actual_range: Range, expected_range: Range) { + // This test verifies that when a transaction produces ValidationTraces, + // range_start and range_end get persisted in the database + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let l2_chain_id = L2ChainId::default(); + let fee_input = MockBatchFeeParamsProvider::default() + .get_batch_fee_input_scaled(1.0, 1.0) + .await + .unwrap(); + let (base_fee, gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); + let tx = create_l2_transaction(base_fee, gas_per_pubdata); + let tx_hash = tx.hash(); + + // Manually set sufficient balance for the tx initiator. + StateBuilder::default() + .with_balance(tx.initiator_account(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let mut tx_executor = MockOneshotExecutor::default(); + tx_executor.set_tx_responses(move |received_tx, _| { + assert_eq!(received_tx.hash(), tx_hash); + ExecutionResult::Success { output: vec![] } + }); + tx_executor.set_tx_validation_traces_responses(move |tx, _| { + assert_eq!(tx.hash(), tx_hash); + ValidationTraces { + timestamp_asserter_range: Some(actual_range.clone()), + } + }); + + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + let block_args = pending_block_args(&tx_sender).await; + + let submission_result = tx_sender.submit_tx(tx, block_args).await.unwrap(); + assert_matches!(submission_result.0, L2TxSubmissionResult::Added); + + let mut storage = pool.connection().await.unwrap(); + let storage_tx = storage + .transactions_dal() + .get_storage_tx_by_hash(tx_hash) + .await + .unwrap() + .expect("transaction is not persisted"); + assert_eq!( + expected_range.start, + storage_tx + .timestamp_asserter_range_start + .unwrap() + .and_utc() + .timestamp() + ); + assert_eq!( + expected_range.end, + storage_tx + .timestamp_asserter_range_end + .unwrap() + .and_utc() + .timestamp() + ); +} + +#[tokio::test] +async fn submitting_tx_with_validation_traces() { + // This test verifies that when a transaction produces ValidationTraces, + // range_start and range_end get persisted in the database + submit_tx_with_validation_traces(10..20, 10..20).await; +} + +#[tokio::test] +async fn submitting_tx_with_validation_traces_resulting_into_overflow() { + // This test verifies that the timestamp in ValidationTraces is capped at + // the maximum value supported by the NaiveDateTime type + submit_tx_with_validation_traces(10..u64::MAX, 10..NaiveDateTime::MAX.and_utc().timestamp()) + .await; +} diff --git a/core/node/api_server/src/tx_sender/tx_sink.rs b/core/node/api_server/src/tx_sender/tx_sink.rs index 3d764816fe0d..1a6a7a733ccd 100644 --- a/core/node/api_server/src/tx_sender/tx_sink.rs +++ b/core/node/api_server/src/tx_sender/tx_sink.rs @@ -1,5 +1,5 @@ use zksync_dal::{transactions_dal::L2TxSubmissionResult, Connection, Core}; -use zksync_multivm::interface::TransactionExecutionMetrics; +use zksync_multivm::interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use zksync_types::{ api::{Transaction, TransactionDetails, TransactionId}, l2::L2Tx, @@ -28,6 +28,7 @@ pub trait TxSink: std::fmt::Debug + Send + Sync + 'static { &self, tx: &L2Tx, execution_metrics: TransactionExecutionMetrics, + validation_traces: ValidationTraces, ) -> Result; /// Attempts to look up the pending nonce for the account in the sink-specific storage. diff --git a/core/node/api_server/src/utils.rs b/core/node/api_server/src/utils.rs index c7a1134682bf..6769e773dc77 100644 --- a/core/node/api_server/src/utils.rs +++ b/core/node/api_server/src/utils.rs @@ -6,33 +6,9 @@ use std::{ time::{Duration, Instant}, }; -use anyhow::Context; use zksync_dal::{Connection, Core, DalError}; -use zksync_multivm::circuit_sequencer_api_latest::boojum::ethereum_types::U256; use zksync_web3_decl::error::Web3Error; -pub(crate) fn prepare_evm_bytecode(raw: &[u8]) -> anyhow::Result> { - // EVM bytecodes are prefixed with a big-endian `U256` bytecode length. - let bytecode_len_bytes = raw.get(..32).context("length < 32")?; - let bytecode_len = U256::from_big_endian(bytecode_len_bytes); - let bytecode_len: usize = bytecode_len - .try_into() - .map_err(|_| anyhow::anyhow!("length ({bytecode_len}) overflow"))?; - let bytecode = raw.get(32..(32 + bytecode_len)).with_context(|| { - format!( - "prefixed length ({bytecode_len}) exceeds real length ({})", - raw.len() - 32 - ) - })?; - // Since slicing above succeeded, this one is safe. - let padding = &raw[(32 + bytecode_len)..]; - anyhow::ensure!( - padding.iter().all(|&b| b == 0), - "bytecode padding contains non-zero bytes" - ); - Ok(bytecode.to_vec()) -} - /// Opens a readonly transaction over the specified connection. pub(crate) async fn open_readonly_transaction<'r>( conn: &'r mut Connection<'_, Core>, @@ -90,15 +66,3 @@ macro_rules! report_filter { ReportFilter::new($interval, &LAST_TIMESTAMP) }}; } - -#[cfg(test)] -mod tests { - use super::*; - use crate::testonly::{PROCESSED_EVM_BYTECODE, RAW_EVM_BYTECODE}; - - #[test] - fn preparing_evm_bytecode() { - let prepared = prepare_evm_bytecode(RAW_EVM_BYTECODE).unwrap(); - assert_eq!(prepared, PROCESSED_EVM_BYTECODE); - } -} diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs index cc2209a35d39..342756013752 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs @@ -268,4 +268,8 @@ impl EthNamespaceServer for EthNamespace { .await .map_err(|err| self.current_method().map_err(err)) } + + async fn max_priority_fee_per_gas(&self) -> RpcResult { + Ok(self.max_priority_fee_per_gas_impl()) + } } diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs index 31c8f15bb1ea..21f3f5ae49e1 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs @@ -58,6 +58,10 @@ impl ZksNamespaceServer for ZksNamespace { Ok(self.get_bridge_contracts_impl().await) } + async fn get_timestamp_asserter(&self) -> RpcResult> { + Ok(self.get_timestamp_asserter_impl()) + } + async fn l1_chain_id(&self) -> RpcResult { Ok(self.l1_chain_id_impl()) } diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index 5206cd3bc2bb..2765de2c2892 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -6,13 +6,14 @@ use zksync_types::{ state_override::StateOverride, BlockId, BlockNumber, FeeHistory, GetLogsFilter, Transaction, TransactionId, TransactionReceipt, TransactionVariant, }, + bytecode::{trim_padded_evm_bytecode, BytecodeMarker}, l2::{L2Tx, TransactionType}, transaction_request::CallRequest, + u256_to_h256, utils::decompose_full_nonce, web3::{self, Bytes, SyncInfo, SyncState}, AccountTreeId, L2BlockNumber, StorageKey, H256, L2_BASE_TOKEN_ADDRESS, U256, }; -use zksync_utils::{bytecode::BytecodeMarker, u256_to_h256}; use zksync_web3_decl::{ error::Web3Error, types::{Address, Block, Filter, FilterChanges, Log, U64}, @@ -21,7 +22,7 @@ use zksync_web3_decl::{ use crate::{ execution_sandbox::BlockArgs, tx_sender::BinarySearchKind, - utils::{open_readonly_transaction, prepare_evm_bytecode}, + utils::open_readonly_transaction, web3::{backend_jsonrpsee::MethodTracer, metrics::API_METRICS, state::RpcState, TypedFilter}, }; @@ -403,12 +404,14 @@ impl EthNamespace { // Check if the bytecode is an EVM bytecode, and if so, pre-process it correspondingly. let marker = BytecodeMarker::new(contract_code.bytecode_hash); let prepared_bytecode = if marker == Some(BytecodeMarker::Evm) { - prepare_evm_bytecode(&contract_code.bytecode).with_context(|| { - format!( - "malformed EVM bytecode at address {address:?}, hash = {:?}", - contract_code.bytecode_hash - ) - })? + trim_padded_evm_bytecode(&contract_code.bytecode) + .with_context(|| { + format!( + "malformed EVM bytecode at address {address:?}, hash = {:?}", + contract_code.bytecode_hash + ) + })? + .to_vec() } else { contract_code.bytecode }; @@ -863,6 +866,11 @@ impl EthNamespace { } }) } + + pub fn max_priority_fee_per_gas_impl(&self) -> U256 { + // ZKsync does not require priority fee. + 0u64.into() + } } // Bogus methods. diff --git a/core/node/api_server/src/web3/namespaces/zks.rs b/core/node/api_server/src/web3/namespaces/zks.rs index bcfd7daf3461..05c90f0b0140 100644 --- a/core/node/api_server/src/web3/namespaces/zks.rs +++ b/core/node/api_server/src/web3/namespaces/zks.rs @@ -7,12 +7,14 @@ use zksync_mini_merkle_tree::MiniMerkleTree; use zksync_multivm::interface::VmExecutionResultAndLogs; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ + address_to_h256, api::{ state_override::StateOverride, BlockDetails, BridgeAddresses, GetLogsFilter, L1BatchDetails, L2ToL1LogProof, Proof, ProtocolVersion, StorageProof, TransactionDetails, }, fee::Fee, fee_model::{FeeParams, PubdataIndependentBatchFeeModelInput}, + h256_to_u256, l1::L1Tx, l2::L2Tx, l2_to_l1_log::{l2_to_l1_logs_tree_size, L2ToL1Log}, @@ -23,7 +25,6 @@ use zksync_types::{ AccountTreeId, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, Transaction, L1_MESSENGER_ADDRESS, L2_BASE_TOKEN_ADDRESS, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, U64, }; -use zksync_utils::{address_to_h256, h256_to_u256}; use zksync_web3_decl::{ error::Web3Error, types::{Address, Token, H256}, @@ -151,6 +152,10 @@ impl ZksNamespace { self.state.bridge_addresses_handle.read().await } + pub fn get_timestamp_asserter_impl(&self) -> Option
{ + self.state.api_config.timestamp_asserter_address + } + pub fn l1_chain_id_impl(&self) -> U64 { U64::from(*self.state.api_config.l1_chain_id) } diff --git a/core/node/api_server/src/web3/state.rs b/core/node/api_server/src/web3/state.rs index 18c206eaf584..d43771811ee0 100644 --- a/core/node/api_server/src/web3/state.rs +++ b/core/node/api_server/src/web3/state.rs @@ -115,6 +115,7 @@ pub struct InternalApiConfig { pub filters_disabled: bool, pub dummy_verifier: bool, pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, + pub timestamp_asserter_address: Option
, } impl InternalApiConfig { @@ -146,6 +147,7 @@ impl InternalApiConfig { .l1_weth_bridge_proxy_addr .unwrap_or_default(), ), + l2_legacy_shared_bridge: contracts_config.l2_legacy_shared_bridge_addr, }, bridgehub_proxy_addr: contracts_config .ecosystem_contracts @@ -167,6 +169,7 @@ impl InternalApiConfig { filters_disabled: web3_config.filters_disabled, dummy_verifier: genesis_config.dummy_verifier, l1_batch_commit_data_generator_mode: genesis_config.l1_batch_commit_data_generator_mode, + timestamp_asserter_address: contracts_config.l2_timestamp_asserter_addr, } } } diff --git a/core/node/api_server/src/web3/testonly.rs b/core/node/api_server/src/web3/testonly.rs index 2d642b9a04b8..540ea085711b 100644 --- a/core/node/api_server/src/web3/testonly.rs +++ b/core/node/api_server/src/web3/testonly.rs @@ -34,6 +34,7 @@ pub(crate) async fn create_test_tx_sender( &web3_config, wallets.state_keeper.unwrap().fee_account.address(), l2_chain_id, + None, ); let storage_caches = PostgresStorageCaches::new(1, 1); diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index c83279709a30..feac8eb8d17f 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -19,10 +19,9 @@ use zksync_config::{ use zksync_contracts::BaseSystemContracts; use zksync_dal::{transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, CoreDal}; use zksync_multivm::interface::{ - TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, - VmExecutionMetrics, + tracer::ValidationTraces, TransactionExecutionMetrics, TransactionExecutionResult, + TxExecutionStatus, VmEvent, VmExecutionMetrics, }; -use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{ create_l1_batch, create_l1_batch_metadata, create_l2_block, create_l2_transaction, @@ -34,6 +33,10 @@ use zksync_system_constants::{ use zksync_types::{ api, block::{pack_block_info, L2BlockHasher, L2BlockHeader}, + bytecode::{ + testonly::{PROCESSED_EVM_BYTECODE, RAW_EVM_BYTECODE}, + BytecodeHash, + }, fee_model::{BatchFeeInput, FeeParams}, get_nonce_key, l2::L2Tx, @@ -41,14 +44,11 @@ use zksync_types::{ system_contracts::get_system_smart_contracts, tokens::{TokenInfo, TokenMetadata}, tx::IncludedTxLocation, + u256_to_h256, utils::{storage_key_for_eth_balance, storage_key_for_standard_token_balance}, AccountTreeId, Address, L1BatchNumber, Nonce, ProtocolVersionId, StorageKey, StorageLog, H256, U256, U64, }; -use zksync_utils::{ - bytecode::{hash_bytecode, hash_evm_bytecode}, - u256_to_h256, -}; use zksync_vm_executor::oneshot::MockOneshotExecutor; use zksync_web3_decl::{ client::{Client, DynClient, L2}, @@ -65,11 +65,7 @@ use zksync_web3_decl::{ }; use super::*; -use crate::{ - testonly::{PROCESSED_EVM_BYTECODE, RAW_EVM_BYTECODE}, - tx_sender::SandboxExecutorOptions, - web3::testonly::TestServerBuilder, -}; +use crate::{tx_sender::SandboxExecutorOptions, web3::testonly::TestServerBuilder}; mod debug; mod filters; @@ -365,7 +361,11 @@ async fn store_custom_l2_block( let l2_tx = result.transaction.clone().try_into().unwrap(); let tx_submission_result = storage .transactions_dal() - .insert_transaction_l2(&l2_tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &l2_tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); assert_matches!(tx_submission_result, L2TxSubmissionResult::Added); @@ -476,11 +476,7 @@ async fn store_events( } fn scaled_sensible_fee_input(scale: f64) -> BatchFeeInput { - ::default_batch_fee_input_scaled( - FeeParams::sensible_v1_default(), - scale, - scale, - ) + FeeParams::sensible_v1_default().scale(scale, scale) } #[derive(Debug)] @@ -680,7 +676,7 @@ impl HttpTest for StorageAccessWithSnapshotRecovery { fn storage_initialization(&self) -> StorageInitialization { let address = Address::repeat_byte(1); let code_key = get_code_key(&address); - let code_hash = hash_bytecode(&[0; 32]); + let code_hash = BytecodeHash::for_bytecode(&[0; 32]).value(); let balance_key = storage_key_for_eth_balance(&address); let logs = vec![ StorageLog::new_write_log(code_key, code_hash), @@ -776,7 +772,11 @@ impl HttpTest for TransactionCountTest { pending_tx.common_data.nonce = Nonce(2); storage .transactions_dal() - .insert_transaction_l2(&pending_tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &pending_tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); @@ -856,7 +856,11 @@ impl HttpTest for TransactionCountAfterSnapshotRecoveryTest { let mut storage = pool.connection().await?; storage .transactions_dal() - .insert_transaction_l2(&pending_tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &pending_tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); @@ -1167,7 +1171,7 @@ impl GetBytecodeTest { at_block: L2BlockNumber, address: Address, ) -> anyhow::Result<()> { - let evm_bytecode_hash = hash_evm_bytecode(RAW_EVM_BYTECODE); + let evm_bytecode_hash = BytecodeHash::for_evm_bytecode(RAW_EVM_BYTECODE).value(); let code_log = StorageLog::new_write_log(get_code_key(&address), evm_bytecode_hash); connection .storage_logs_dal() diff --git a/core/node/api_server/src/web3/tests/unstable.rs b/core/node/api_server/src/web3/tests/unstable.rs index 1d425f8b9515..e814081afa02 100644 --- a/core/node/api_server/src/web3/tests/unstable.rs +++ b/core/node/api_server/src/web3/tests/unstable.rs @@ -27,14 +27,9 @@ impl HttpTest for GetTeeProofsTest { assert!(proof.is_empty()); - let mut storage = pool.connection().await.unwrap(); - storage - .tee_verifier_input_producer_dal() - .create_tee_verifier_input_producer_job(batch_no) - .await?; - let pubkey = vec![0xDE, 0xAD, 0xBE, 0xEF]; let attestation = vec![0xC0, 0xFF, 0xEE]; + let mut storage = pool.connection().await.unwrap(); let mut tee_proof_generation_dal = storage.tee_proof_generation_dal(); tee_proof_generation_dal .save_attestation(&pubkey, &attestation) diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index 45128f579cda..a82ca3b9e347 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -16,10 +16,9 @@ use zksync_multivm::interface::{ }; use zksync_types::{ api::ApiStorageLog, fee_model::BatchFeeInput, get_intrinsic_constants, - transaction_request::CallRequest, K256PrivateKey, L2ChainId, PackedEthSignature, - StorageLogKind, StorageLogWithPreviousValue, Transaction, U256, + transaction_request::CallRequest, u256_to_h256, vm::FastVmMode, K256PrivateKey, L2ChainId, + PackedEthSignature, StorageLogKind, StorageLogWithPreviousValue, Transaction, U256, }; -use zksync_utils::u256_to_h256; use zksync_vm_executor::oneshot::{ BaseSystemContractsProvider, ContractsKind, MockOneshotExecutor, OneshotEnvParameters, ResolvedBlockInfo, @@ -92,6 +91,7 @@ impl BaseSystemContractsProvider for BaseContractsWithMockE fn executor_options_with_evm_emulator() -> SandboxExecutorOptions { let base_contracts = Arc::::default(); SandboxExecutorOptions { + fast_vm_mode: FastVmMode::Old, estimate_gas: OneshotEnvParameters::new( base_contracts.clone(), L2ChainId::default(), @@ -637,11 +637,8 @@ impl HttpTest for SendTransactionWithDetailedOutputTest { assert_eq!(env.l1_batch.first_l2_block.number, 1); VmExecutionResultAndLogs { - result: ExecutionResult::Success { output: vec![] }, logs: vm_execution_logs.clone(), - statistics: Default::default(), - refunds: Default::default(), - new_known_factory_deps: None, + ..VmExecutionResultAndLogs::mock_success() } }); tx_executor diff --git a/core/node/base_token_adjuster/Cargo.toml b/core/node/base_token_adjuster/Cargo.toml index 9dcf5d796530..b326e7a6b42d 100644 --- a/core/node/base_token_adjuster/Cargo.toml +++ b/core/node/base_token_adjuster/Cargo.toml @@ -19,7 +19,6 @@ zksync_external_price_api.workspace = true zksync_contracts.workspace = true zksync_eth_client.workspace = true zksync_node_fee_model.workspace = true -zksync_utils.workspace = true vise.workspace = true bigdecimal.workspace = true diff --git a/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs b/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs index 0199b06ebd69..599aba36f3e9 100644 --- a/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs +++ b/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs @@ -6,7 +6,7 @@ use std::{ }; use anyhow::Context; -use bigdecimal::{num_bigint::ToBigInt, BigDecimal, Zero}; +use bigdecimal::{BigDecimal, Zero}; use zksync_config::BaseTokenAdjusterConfig; use zksync_eth_client::{BoundEthInterface, CallFunctionArgs, Options}; use zksync_node_fee_model::l1_gas_price::TxParamsProvider; @@ -57,7 +57,7 @@ impl BaseTokenL1Behaviour { self.update_last_persisted_l1_ratio(prev_ratio.clone()); tracing::info!( "Fetched current base token ratio from the L1: {}", - prev_ratio.to_bigint().unwrap() + prev_ratio ); prev_ratio }; @@ -71,7 +71,7 @@ impl BaseTokenL1Behaviour { "Skipping L1 update. current_ratio {}, previous_ratio {}, deviation {}", current_ratio, prev_ratio, - deviation.to_bigint().unwrap() + deviation ); return Ok(()); } @@ -98,7 +98,7 @@ impl BaseTokenL1Behaviour { new_ratio.denominator.get(), base_fee_per_gas, priority_fee_per_gas, - deviation.to_bigint().unwrap() + deviation ); METRICS .l1_gas_used @@ -220,10 +220,16 @@ impl BaseTokenL1Behaviour { if receipt.status == Some(1.into()) { return Ok(receipt.gas_used); } + let reason = (*l1_params.eth_client) + .as_ref() + .failure_reason(hash) + .await + .context("failed getting failure reason of `setTokenMultiplier` transaction")?; return Err(anyhow::Error::msg(format!( - "`setTokenMultiplier` transaction {:?} failed with status {:?}", + "`setTokenMultiplier` transaction {:?} failed with status {:?}, reason: {:?}", hex::encode(hash), - receipt.status + receipt.status, + reason ))); } else { tokio::time::sleep(sleep_duration).await; diff --git a/core/node/base_token_adjuster/src/base_token_ratio_provider.rs b/core/node/base_token_adjuster/src/base_token_ratio_provider.rs index e16ea16ff0f5..b613e5219dd2 100644 --- a/core/node/base_token_adjuster/src/base_token_ratio_provider.rs +++ b/core/node/base_token_adjuster/src/base_token_ratio_provider.rs @@ -81,7 +81,7 @@ impl DBBaseTokenRatioProvider { // Though the DB should be populated very soon after the server starts, it is possible // to have no ratios in the DB right after genesis. Having initial ratios in the DB // from the genesis stage will eliminate this possibility. - tracing::error!("No latest price found in the database. Using default ratio."); + tracing::warn!("No latest price found in the database. Using default ratio."); BaseTokenConversionRatio::default() } Err(err) => anyhow::bail!("Failed to get latest base token ratio: {:?}", err), diff --git a/core/node/block_reverter/src/tests.rs b/core/node/block_reverter/src/tests.rs index 85d894b7fd57..b2c4ee6465f6 100644 --- a/core/node/block_reverter/src/tests.rs +++ b/core/node/block_reverter/src/tests.rs @@ -67,6 +67,7 @@ async fn setup_storage(storage: &mut Connection<'_, Core>, storage_logs: &[Stora virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; storage .blocks_dal() diff --git a/core/node/commitment_generator/Cargo.toml b/core/node/commitment_generator/Cargo.toml index 5ec8410124fc..f0b4046bab42 100644 --- a/core/node/commitment_generator/Cargo.toml +++ b/core/node/commitment_generator/Cargo.toml @@ -16,10 +16,10 @@ zksync_types.workspace = true zksync_dal.workspace = true zksync_health_check.workspace = true zksync_l1_contract_interface.workspace = true -zksync_utils.workspace = true zksync_eth_client.workspace = true zksync_contracts.workspace = true zksync_multivm.workspace = true +zksync_system_constants.workspace = true circuit_sequencer_api_1_4_0.workspace = true circuit_sequencer_api_1_4_1.workspace = true circuit_sequencer_api_1_5_0.workspace = true diff --git a/core/node/commitment_generator/src/lib.rs b/core/node/commitment_generator/src/lib.rs index cf6971b041c6..2ce0152abab6 100644 --- a/core/node/commitment_generator/src/lib.rs +++ b/core/node/commitment_generator/src/lib.rs @@ -9,17 +9,20 @@ use zksync_l1_contract_interface::i_executor::commit::kzg::pubdata_to_blob_commi use zksync_types::{ blob::num_blobs_required, commitment::{ - AuxCommitments, CommitmentCommonInput, CommitmentInput, L1BatchAuxiliaryOutput, + AuxCommitments, BlobHash, CommitmentCommonInput, CommitmentInput, L1BatchAuxiliaryOutput, L1BatchCommitment, L1BatchCommitmentArtifacts, L1BatchCommitmentMode, }, + h256_to_u256, writes::{InitialStorageWrite, RepeatedStorageWrite, StateDiffRecord}, L1BatchNumber, ProtocolVersionId, StorageKey, H256, U256, }; -use zksync_utils::h256_to_u256; use crate::{ metrics::{CommitmentStage, METRICS}, - utils::{convert_vm_events_to_log_queries, CommitmentComputer, RealCommitmentComputer}, + utils::{ + convert_vm_events_to_log_queries, pubdata_to_blob_linear_hashes, read_aggregation_root, + CommitmentComputer, RealCommitmentComputer, + }, }; mod metrics; @@ -263,14 +266,40 @@ impl CommitmentGenerator { } state_diffs.sort_unstable_by_key(|rec| (rec.address, rec.key)); - let blob_commitments = if protocol_version.is_post_1_4_2() { + let blob_hashes = if protocol_version.is_post_1_4_2() { let pubdata_input = header.pubdata_input.with_context(|| { format!("`pubdata_input` is missing for L1 batch #{l1_batch_number}") })?; - pubdata_to_blob_commitments(num_blobs_required(&protocol_version), &pubdata_input) + let commitments = pubdata_to_blob_commitments( + num_blobs_required(&protocol_version), + &pubdata_input, + ); + let linear_hashes = pubdata_to_blob_linear_hashes( + num_blobs_required(&protocol_version), + pubdata_input, + ); + + commitments + .into_iter() + .zip(linear_hashes) + .map(|(commitment, linear_hash)| BlobHash { + commitment, + linear_hash, + }) + .collect::>() } else { - vec![H256::zero(); num_blobs_required(&protocol_version)] + vec![Default::default(); num_blobs_required(&protocol_version)] + }; + + let aggregation_root = if protocol_version.is_pre_gateway() { + let mut connection = self + .connection_pool + .connection_tagged("commitment_generator") + .await?; + read_aggregation_root(&mut connection, l1_batch_number).await? + } else { + H256::zero() }; CommitmentInput::PostBoojum { @@ -278,7 +307,8 @@ impl CommitmentGenerator { system_logs: header.system_logs, state_diffs, aux_commitments, - blob_commitments, + blob_hashes, + aggregation_root, } }; @@ -357,14 +387,10 @@ impl CommitmentGenerator { (L1BatchCommitmentMode::Rollup, _) => { // Do nothing } - - ( - L1BatchCommitmentMode::Validium, - CommitmentInput::PostBoojum { - blob_commitments, .. - }, - ) => { - blob_commitments.fill(H256::zero()); + (L1BatchCommitmentMode::Validium, CommitmentInput::PostBoojum { blob_hashes, .. }) => { + for hashes in blob_hashes { + hashes.commitment = H256::zero(); + } } (L1BatchCommitmentMode::Validium, _) => { /* Do nothing */ } } @@ -374,14 +400,9 @@ impl CommitmentGenerator { match (self.commitment_mode, &mut commitment.auxiliary_output) { ( L1BatchCommitmentMode::Validium, - L1BatchAuxiliaryOutput::PostBoojum { - blob_linear_hashes, - blob_commitments, - .. - }, + L1BatchAuxiliaryOutput::PostBoojum { blob_hashes, .. }, ) => { - blob_linear_hashes.fill(H256::zero()); - blob_commitments.fill(H256::zero()); + blob_hashes.fill(Default::default()); } _ => { /* Do nothing */ } } diff --git a/core/node/commitment_generator/src/utils.rs b/core/node/commitment_generator/src/utils.rs index 86643b6b581b..cc44d7a03c71 100644 --- a/core/node/commitment_generator/src/utils.rs +++ b/core/node/commitment_generator/src/utils.rs @@ -2,6 +2,7 @@ use std::fmt; +use anyhow::Context; use itertools::Itertools; use zk_evm_1_3_3::{ aux_structures::Timestamp as Timestamp_1_3_3, @@ -15,13 +16,18 @@ use zk_evm_1_5_0::{ aux_structures::Timestamp as Timestamp_1_5_0, zk_evm_abstractions::queries::LogQuery as LogQuery_1_5_0, }; +use zksync_dal::{Connection, Core, CoreDal}; +use zksync_l1_contract_interface::i_executor::commit::kzg::ZK_SYNC_BYTES_PER_BLOB; use zksync_multivm::{interface::VmEvent, utils::get_used_bootloader_memory_bytes}; +use zksync_system_constants::message_root::{AGG_TREE_HEIGHT_KEY, AGG_TREE_NODES_KEY}; use zksync_types::{ + address_to_u256, h256_to_u256, u256_to_h256, vm::VmVersion, + web3::keccak256, zk_evm_types::{LogQuery, Timestamp}, - ProtocolVersionId, EVENT_WRITER_ADDRESS, H256, U256, + AccountTreeId, L1BatchNumber, ProtocolVersionId, StorageKey, EVENT_WRITER_ADDRESS, H256, + L2_MESSAGE_ROOT_ADDRESS, U256, }; -use zksync_utils::{address_to_u256, expand_memory_contents, h256_to_u256}; /// Encapsulates computations of commitment components. /// @@ -68,7 +74,8 @@ impl CommitmentComputer for RealCommitmentComputer { ), )), VmVersion::Vm1_5_0SmallBootloaderMemory - | VmVersion::Vm1_5_0IncreasedBootloaderMemory => Ok(H256( + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => Ok(H256( circuit_sequencer_api_1_5_0::commitments::events_queue_commitment_fixed( &events_queue .iter() @@ -106,7 +113,8 @@ impl CommitmentComputer for RealCommitmentComputer { ), )), VmVersion::Vm1_5_0SmallBootloaderMemory - | VmVersion::Vm1_5_0IncreasedBootloaderMemory => Ok(H256( + | VmVersion::Vm1_5_0IncreasedBootloaderMemory + | VmVersion::VmGateway => Ok(H256( circuit_sequencer_api_1_5_0::commitments::initial_heap_content_commitment_fixed( &full_bootloader_memory, ), @@ -116,6 +124,15 @@ impl CommitmentComputer for RealCommitmentComputer { } } +fn expand_memory_contents(packed: &[(usize, U256)], memory_size_bytes: usize) -> Vec { + let mut result: Vec = vec![0; memory_size_bytes]; + + for (offset, value) in packed { + value.to_big_endian(&mut result[(offset * 32)..(offset + 1) * 32]); + } + + result +} fn to_log_query_1_3_3(log_query: LogQuery) -> LogQuery_1_3_3 { LogQuery_1_3_3 { timestamp: Timestamp_1_3_3(log_query.timestamp.0), @@ -234,3 +251,75 @@ pub(crate) fn convert_vm_events_to_log_queries(events: &[VmEvent]) -> Vec, +) -> Vec { + // Now, we need to calculate the linear hashes of the blobs. + // Firstly, let's pad the pubdata to the size of the blob. + if pubdata_input.len() % ZK_SYNC_BYTES_PER_BLOB != 0 { + pubdata_input.resize( + pubdata_input.len() + + (ZK_SYNC_BYTES_PER_BLOB - pubdata_input.len() % ZK_SYNC_BYTES_PER_BLOB), + 0, + ); + } + + let mut result = vec![H256::zero(); blobs_required]; + + pubdata_input + .chunks(ZK_SYNC_BYTES_PER_BLOB) + .enumerate() + .for_each(|(i, chunk)| { + result[i] = H256(keccak256(chunk)); + }); + + result +} + +pub(crate) async fn read_aggregation_root( + connection: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, +) -> anyhow::Result { + let (_, last_l2_block) = connection + .blocks_dal() + .get_l2_block_range_of_l1_batch(l1_batch_number) + .await? + .context("No range for batch")?; + + let agg_tree_height_slot = StorageKey::new( + AccountTreeId::new(L2_MESSAGE_ROOT_ADDRESS), + H256::from_low_u64_be(AGG_TREE_HEIGHT_KEY as u64), + ); + + let agg_tree_height = connection + .storage_web3_dal() + .get_historical_value_unchecked(agg_tree_height_slot.hashed_key(), last_l2_block) + .await?; + let agg_tree_height = h256_to_u256(agg_tree_height); + + // `nodes[height][0]` + let agg_tree_root_hash_key = + n_dim_array_key_in_layout(AGG_TREE_NODES_KEY, &[agg_tree_height, U256::zero()]); + let agg_tree_root_hash_slot = StorageKey::new( + AccountTreeId::new(L2_MESSAGE_ROOT_ADDRESS), + agg_tree_root_hash_key, + ); + + Ok(connection + .storage_web3_dal() + .get_historical_value_unchecked(agg_tree_root_hash_slot.hashed_key(), last_l2_block) + .await?) +} + +fn n_dim_array_key_in_layout(array_key: usize, indices: &[U256]) -> H256 { + let mut key: H256 = u256_to_h256(array_key.into()); + + for index in indices { + key = H256(keccak256(key.as_bytes())); + key = u256_to_h256(h256_to_u256(key).overflowing_add(*index).0); + } + + key +} diff --git a/core/node/consensus/Cargo.toml b/core/node/consensus/Cargo.toml index fdcc9089e339..427454221c84 100644 --- a/core/node/consensus/Cargo.toml +++ b/core/node/consensus/Cargo.toml @@ -30,7 +30,6 @@ zksync_state_keeper.workspace = true zksync_node_sync.workspace = true zksync_system_constants.workspace = true zksync_types.workspace = true -zksync_utils.workspace = true zksync_web3_decl.workspace = true zksync_state.workspace = true zksync_vm_executor.workspace = true @@ -43,12 +42,13 @@ thiserror.workspace = true tracing.workspace = true tokio.workspace = true semver.workspace = true +vise.workspace = true [dev-dependencies] zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true zksync_node_api_server.workspace = true -zksync_test_account.workspace = true +zksync_test_contracts.workspace = true test-casing.workspace = true rand.workspace = true diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index 518a7ebb29aa..e417b68cf2cb 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -17,13 +17,14 @@ use zksync_web3_decl::{ use super::{config, storage::Store, ConsensusConfig, ConsensusSecrets}; use crate::{ + metrics::METRICS, registry, storage::{self, ConnectionPool}, }; -/// If less than TEMPORARY_FETCHER_THRESHOLD certificates are missing, -/// the temporary fetcher will stop fetching blocks. -pub(crate) const TEMPORARY_FETCHER_THRESHOLD: u64 = 10; +/// Whenever more than FALLBACK_FETCHER_THRESHOLD certificates are missing, +/// the fallback fetcher is active. +pub(crate) const FALLBACK_FETCHER_THRESHOLD: u64 = 10; /// External node. pub(super) struct EN { @@ -35,14 +36,6 @@ pub(super) struct EN { impl EN { /// Task running a consensus node for the external node. /// It may be a validator, but it cannot be a leader (cannot propose blocks). - /// - /// If `enable_pregenesis` is false, - /// before starting the consensus node it fetches all the blocks - /// older than consensus genesis from the main node using json RPC. - /// NOTE: currently `enable_pregenesis` is hardcoded to `false` in `era.rs`. - /// True is used only in tests. Once the `block_metadata` RPC is enabled everywhere - /// this flag should be removed and fetching pregenesis blocks will always be done - /// over the gossip network. pub async fn run( self, ctx: &ctx::Ctx, @@ -50,7 +43,6 @@ impl EN { cfg: ConsensusConfig, secrets: ConsensusSecrets, build_version: Option, - enable_pregenesis: bool, ) -> anyhow::Result<()> { let attester = config::attester_key(&secrets).context("attester_key")?; @@ -74,24 +66,13 @@ impl EN { .await .wrap("try_update_global_config()")?; - let mut payload_queue = conn + let payload_queue = conn .new_payload_queue(ctx, actions, self.sync_state.clone()) .await .wrap("new_payload_queue()")?; drop(conn); - // Fetch blocks before the genesis. - if !enable_pregenesis { - self.fetch_blocks( - ctx, - &mut payload_queue, - Some(global_config.genesis.first_block), - ) - .await - .wrap("fetch_blocks()")?; - } - // Monitor the genesis of the main node. // If it changes, it means that a hard fork occurred and we need to reset the consensus state. s.spawn_bg::<()>({ @@ -100,7 +81,12 @@ impl EN { let old = old; loop { if let Ok(new) = self.fetch_global_config(ctx).await { - if new != old { + // We verify the transition here to work around the situation + // where `consenus_global_config()` RPC fails randomly and fallback + // to `consensus_genesis()` RPC activates. + if new != old + && consensus_dal::verify_config_transition(&old, &new).is_ok() + { return Err(anyhow::format_err!( "global config changed: old {old:?}, new {new:?}" ) @@ -122,7 +108,7 @@ impl EN { ) .await .wrap("Store::new()")?; - s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + s.spawn_bg(async { Ok(runner.run(ctx).await.context("Store::runner()")?) }); // Run the temporary fetcher until the certificates are backfilled. // Temporary fetcher should be removed once json RPC syncing is fully deprecated. @@ -130,25 +116,34 @@ impl EN { let store = store.clone(); async { let store = store; - self.temporary_block_fetcher(ctx, &store).await?; - tracing::info!( - "temporary block fetcher finished, switching to p2p fetching only" - ); - Ok(()) + self.fallback_block_fetcher(ctx, &store) + .await + .wrap("fallback_block_fetcher()") } }); let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) .await .wrap("BlockStore::new()")?; - s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + s.spawn_bg(async { Ok(runner.run(ctx).await.context("BlockStore::run()")?) }); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(self.run_attestation_controller( - ctx, - global_config.clone(), - attestation.clone(), - )); + s.spawn_bg({ + let global_config = global_config.clone(); + let attestation = attestation.clone(); + async { + let res = self + .run_attestation_controller(ctx, global_config, attestation) + .await + .wrap("run_attestation_controller()"); + // Attestation currently is not critical for the node to function. + // If it fails, we just log the error and continue. + if let Err(err) = res { + tracing::error!("attestation controller failed: {err:#}"); + } + Ok(()) + } + }); let executor = executor::Executor { config: config::executor(&cfg, &secrets, &global_config, build_version)?, @@ -183,7 +178,7 @@ impl EN { tracing::warn!("\ WARNING: this node is using ZKsync API synchronization, which will be deprecated soon. \ Please follow this instruction to switch to p2p synchronization: \ - https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/09_decentralization.md"); + https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/10_decentralization.md"); let res: ctx::Result<()> = scope::run!(ctx, |ctx, s| async { // Update sync state in the background. s.spawn_bg(self.fetch_state_loop(ctx)); @@ -195,7 +190,7 @@ impl EN { .new_payload_queue(ctx, actions, self.sync_state.clone()) .await .wrap("new_fetcher_cursor()")?; - self.fetch_blocks(ctx, &mut payload_queue, None).await + self.fetch_blocks(ctx, &mut payload_queue).await }) .await; match res { @@ -217,7 +212,11 @@ impl EN { let mut next = attester::BatchNumber(0); loop { let status = loop { - match self.fetch_attestation_status(ctx).await { + match self + .fetch_attestation_status(ctx) + .await + .wrap("fetch_attestation_status()") + { Err(err) => tracing::warn!("{err:#}"), Ok(status) => { if status.genesis != cfg.genesis.hash() { @@ -362,9 +361,14 @@ impl EN { } /// Fetches (with retries) the given block from the main node. - async fn fetch_block(&self, ctx: &ctx::Ctx, n: L2BlockNumber) -> ctx::Result { + async fn fetch_block( + &self, + ctx: &ctx::Ctx, + n: validator::BlockNumber, + ) -> ctx::Result { const RETRY_INTERVAL: time::Duration = time::Duration::seconds(5); - + let n = L2BlockNumber(n.0.try_into().context("overflow")?); + METRICS.fetch_block.inc(); loop { match ctx.wait(self.client.sync_l2_block(n, true)).await? { Ok(Some(block)) => return Ok(block.try_into()?), @@ -376,9 +380,8 @@ impl EN { } } - /// Fetches blocks from the main node directly, until the certificates - /// are backfilled. This allows for smooth transition from json RPC to p2p block syncing. - pub(crate) async fn temporary_block_fetcher( + /// Fetches blocks from the main node directly whenever the EN is lagging behind too much. + pub(crate) async fn fallback_block_fetcher( &self, ctx: &ctx::Ctx, store: &Store, @@ -386,65 +389,63 @@ impl EN { const MAX_CONCURRENT_REQUESTS: usize = 30; scope::run!(ctx, |ctx, s| async { let (send, mut recv) = ctx::channel::bounded(MAX_CONCURRENT_REQUESTS); - s.spawn(async { - let Some(mut next) = store.next_block(ctx).await? else { - return Ok(()); - }; - while store.persisted().borrow().next().0 + TEMPORARY_FETCHER_THRESHOLD < next.0 { - let n = L2BlockNumber(next.0.try_into().context("overflow")?); - self.sync_state.wait_for_main_node_block(ctx, n).await?; - send.send(ctx, s.spawn(self.fetch_block(ctx, n))).await?; + // TODO: metrics. + s.spawn::<()>(async { + let send = send; + let is_lagging = + |main| main >= store.persisted().borrow().next() + FALLBACK_FETCHER_THRESHOLD; + let mut next = store.next_block(ctx).await.wrap("next_block()")?; + loop { + // Wait until p2p syncing is lagging. + self.sync_state + .wait_for_main_node_block(ctx, is_lagging) + .await?; + // Determine the next block to fetch and wait for it to be available. + next = next.max(store.next_block(ctx).await.wrap("next_block()")?); + self.sync_state + .wait_for_main_node_block(ctx, |main| main >= next) + .await?; + // Fetch the block asynchronously. + send.send(ctx, s.spawn(self.fetch_block(ctx, next))).await?; next = next.next(); } - drop(send); - Ok(()) }); - while let Ok(block) = recv.recv_or_disconnected(ctx).await? { + loop { + let block = recv.recv(ctx).await?; store .queue_next_fetched_block(ctx, block.join(ctx).await?) .await .wrap("queue_next_fetched_block()")?; } - Ok(()) }) .await } - /// Fetches blocks from the main node in range `[cursor.next()..end)`. + /// Fetches blocks starting with `queue.next()`. async fn fetch_blocks( &self, ctx: &ctx::Ctx, queue: &mut storage::PayloadQueue, - end: Option, ) -> ctx::Result<()> { const MAX_CONCURRENT_REQUESTS: usize = 30; - let first = queue.next(); - let mut next = first; + let mut next = queue.next(); scope::run!(ctx, |ctx, s| async { let (send, mut recv) = ctx::channel::bounded(MAX_CONCURRENT_REQUESTS); - s.spawn(async { + s.spawn::<()>(async { let send = send; - while end.map_or(true, |end| next < end) { - let n = L2BlockNumber(next.0.try_into().context("overflow")?); - self.sync_state.wait_for_main_node_block(ctx, n).await?; - send.send(ctx, s.spawn(self.fetch_block(ctx, n))).await?; + loop { + self.sync_state + .wait_for_main_node_block(ctx, |main| main >= next) + .await?; + send.send(ctx, s.spawn(self.fetch_block(ctx, next))).await?; next = next.next(); } - Ok(()) }); - while end.map_or(true, |end| queue.next() < end) { + loop { let block = recv.recv(ctx).await?.join(ctx).await?; - queue.send(block).await?; + queue.send(block).await.context("queue.send()")?; } - Ok(()) }) - .await?; - // If fetched anything, wait for the last block to be stored persistently. - if first < queue.next() { - self.pool - .wait_for_payload(ctx, queue.next().prev().unwrap()) - .await?; - } - Ok(()) + .await } } diff --git a/core/node/consensus/src/era.rs b/core/node/consensus/src/era.rs index 916b7cdd89a5..3150f839680e 100644 --- a/core/node/consensus/src/era.rs +++ b/core/node/consensus/src/era.rs @@ -59,18 +59,8 @@ pub async fn run_external_node( is_validator = secrets.validator_key.is_some(), "running external node" ); - // We will enable it once the main node on all envs supports - // `block_metadata()` JSON RPC method. - let enable_pregenesis = false; - en.run( - ctx, - actions, - cfg, - secrets, - Some(build_version), - enable_pregenesis, - ) - .await + en.run(ctx, actions, cfg, secrets, Some(build_version)) + .await } None => { tracing::info!("running fetcher"); diff --git a/core/node/consensus/src/lib.rs b/core/node/consensus/src/lib.rs index 8bf078120aa9..d89aa5f5e829 100644 --- a/core/node/consensus/src/lib.rs +++ b/core/node/consensus/src/lib.rs @@ -9,6 +9,7 @@ mod abi; mod config; mod en; pub mod era; +mod metrics; mod mn; mod registry; mod storage; diff --git a/core/node/consensus/src/metrics.rs b/core/node/consensus/src/metrics.rs new file mode 100644 index 000000000000..f53bb9320917 --- /dev/null +++ b/core/node/consensus/src/metrics.rs @@ -0,0 +1,13 @@ +//! Consensus related metrics. + +#[derive(Debug, vise::Metrics)] +#[metrics(prefix = "zksync_node_consensus")] +pub(crate) struct Metrics { + /// Number of blocks that has been fetched via JSON-RPC. + /// It is used only as a fallback when the p2p syncing is disabled or falling behind. + /// so it shouldn't be increasing under normal circumstances if p2p syncing is enabled. + pub fetch_block: vise::Counter, +} + +#[vise::register] +pub(super) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index 5abbdc3503b3..a392acfbe5f0 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -10,7 +10,7 @@ use zksync_dal::consensus_dal; use crate::{ config, registry, - storage::{ConnectionPool, InsertCertificateError, Store}, + storage::{ConnectionPool, Store}, }; /// Task running a consensus validator for the main node. @@ -30,7 +30,7 @@ pub async fn run_main_node( tracing::debug!(is_attester = attester.is_some(), "main node attester mode"); - scope::run!(&ctx, |ctx, s| async { + let res: ctx::Result<()> = scope::run!(&ctx, |ctx, s| async { if let Some(spec) = &cfg.genesis_spec { let spec = config::GenesisSpec::parse(spec).context("GenesisSpec::parse()")?; @@ -46,7 +46,7 @@ pub async fn run_main_node( let (store, runner) = Store::new(ctx, pool.clone(), None, None) .await .wrap("Store::new()")?; - s.spawn_bg(runner.run(ctx)); + s.spawn_bg(async { Ok(runner.run(ctx).await.context("Store::runner()")?) }); let global_config = pool .connection(ctx) @@ -56,25 +56,36 @@ pub async fn run_main_node( .await .wrap("global_config()")? .context("global_config() disappeared")?; - anyhow::ensure!( - global_config.genesis.leader_selection - == validator::LeaderSelectionMode::Sticky(validator_key.public()), - "unsupported leader selection mode - main node has to be the leader" - ); + if global_config.genesis.leader_selection + != validator::LeaderSelectionMode::Sticky(validator_key.public()) + { + return Err(anyhow::format_err!( + "unsupported leader selection mode - main node has to be the leader" + ) + .into()); + } let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) .await .wrap("BlockStore::new()")?; - s.spawn_bg(runner.run(ctx)); + s.spawn_bg(async { Ok(runner.run(ctx).await.context("BlockStore::run()")?) }); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(run_attestation_controller( - ctx, - &pool, - global_config.clone(), - attestation.clone(), - )); - + s.spawn_bg({ + let global_config = global_config.clone(); + let attestation = attestation.clone(); + async { + let res = run_attestation_controller(ctx, &pool, global_config, attestation) + .await + .wrap("run_attestation_controller()"); + // Attestation currently is not critical for the node to function. + // If it fails, we just log the error and continue. + if let Err(err) = res { + tracing::error!("attestation controller failed: {err:#}"); + } + Ok(()) + } + }); let executor = executor::Executor { config: config::executor(&cfg, &secrets, &global_config, None)?, block_store, @@ -87,9 +98,14 @@ pub async fn run_main_node( }; tracing::info!("running the main node executor"); - executor.run(ctx).await + executor.run(ctx).await.context("executor")?; + Ok(()) }) - .await + .await; + match res { + Ok(()) | Err(ctx::Error::Canceled(_)) => Ok(()), + Err(ctx::Error::Internal(err)) => Err(err), + } } /// Manages attestation state by configuring the @@ -100,94 +116,84 @@ async fn run_attestation_controller( pool: &ConnectionPool, cfg: consensus_dal::GlobalConfig, attestation: Arc, -) -> anyhow::Result<()> { +) -> ctx::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); let registry = registry::Registry::new(cfg.genesis, pool.clone()).await; let registry_addr = cfg.registry_address.map(registry::Address::new); let mut next = attester::BatchNumber(0); - let res = async { - loop { - // After regenesis it might happen that the batch number for the first block - // is not immediately known (the first block was not produced yet), - // therefore we need to wait for it. - let status = loop { - match pool - .connection(ctx) - .await - .wrap("connection()")? - .attestation_status(ctx) - .await - .wrap("attestation_status()")? - { - Some(status) if status.next_batch_to_attest >= next => break status, - _ => {} - } - ctx.sleep(POLL_INTERVAL).await?; - }; - next = status.next_batch_to_attest.next(); - tracing::info!( - "waiting for hash of batch {:?}", - status.next_batch_to_attest - ); - let info = pool - .wait_for_batch_info(ctx, status.next_batch_to_attest, POLL_INTERVAL) - .await?; - let hash = consensus_dal::batch_hash(&info); - let Some(committee) = registry - .attester_committee_for(ctx, registry_addr, status.next_batch_to_attest) - .await - .wrap("attester_committee_for()")? - else { - tracing::info!("attestation not required"); - continue; - }; - let committee = Arc::new(committee); - // Persist the derived committee. - pool.connection(ctx) - .await - .wrap("connection")? - .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) - .await - .wrap("upsert_attester_committee()")?; - tracing::info!( - "attesting batch {:?} with hash {hash:?}", - status.next_batch_to_attest - ); - attestation - .start_attestation(Arc::new(attestation::Info { - batch_to_attest: attester::Batch { - hash, - number: status.next_batch_to_attest, - genesis: status.genesis, - }, - committee, - })) - .await - .context("start_attestation()")?; - // Main node is the only node which can update the global AttestationStatus, - // therefore we can synchronously wait for the certificate. - let qc = attestation - .wait_for_cert(ctx, status.next_batch_to_attest) - .await? - .context("attestation config has changed unexpectedly")?; - tracing::info!( - "collected certificate for batch {:?}", - status.next_batch_to_attest - ); - pool.connection(ctx) + loop { + // After regenesis it might happen that the batch number for the first block + // is not immediately known (the first block was not produced yet), + // therefore we need to wait for it. + let status = loop { + match pool + .connection(ctx) .await .wrap("connection()")? - .insert_batch_certificate(ctx, &qc) + .attestation_status(ctx) .await - .map_err(|err| match err { - InsertCertificateError::Canceled(err) => ctx::Error::Canceled(err), - InsertCertificateError::Inner(err) => ctx::Error::Internal(err.into()), - })?; - } - } - .await; - match res { - Ok(()) | Err(ctx::Error::Canceled(_)) => Ok(()), - Err(ctx::Error::Internal(err)) => Err(err), + .wrap("attestation_status()")? + { + Some(status) if status.next_batch_to_attest >= next => break status, + _ => {} + } + ctx.sleep(POLL_INTERVAL).await?; + }; + next = status.next_batch_to_attest.next(); + tracing::info!( + "waiting for hash of batch {:?}", + status.next_batch_to_attest + ); + let info = pool + .wait_for_batch_info(ctx, status.next_batch_to_attest, POLL_INTERVAL) + .await?; + let hash = consensus_dal::batch_hash(&info); + let Some(committee) = registry + .attester_committee_for(ctx, registry_addr, status.next_batch_to_attest) + .await + .wrap("attester_committee_for()")? + else { + tracing::info!("attestation not required"); + continue; + }; + let committee = Arc::new(committee); + // Persist the derived committee. + pool.connection(ctx) + .await + .wrap("connection")? + .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) + .await + .wrap("upsert_attester_committee()")?; + tracing::info!( + "attesting batch {:?} with hash {hash:?}", + status.next_batch_to_attest + ); + attestation + .start_attestation(Arc::new(attestation::Info { + batch_to_attest: attester::Batch { + hash, + number: status.next_batch_to_attest, + genesis: status.genesis, + }, + committee, + })) + .await + .context("start_attestation()")?; + // Main node is the only node which can update the global AttestationStatus, + // therefore we can synchronously wait for the certificate. + let qc = attestation + .wait_for_cert(ctx, status.next_batch_to_attest) + .await? + .context("attestation config has changed unexpectedly")?; + tracing::info!( + "collected certificate for batch {:?}", + status.next_batch_to_attest + ); + pool.connection(ctx) + .await + .wrap("connection()")? + .insert_batch_certificate(ctx, &qc) + .await + .wrap("insert_batch_certificate()")?; } } diff --git a/core/node/consensus/src/registry/testonly.rs b/core/node/consensus/src/registry/testonly.rs index 07a87e3b676e..8742d9e52c63 100644 --- a/core/node/consensus/src/registry/testonly.rs +++ b/core/node/consensus/src/registry/testonly.rs @@ -1,7 +1,7 @@ use rand::Rng; use zksync_consensus_crypto::ByteFmt; use zksync_consensus_roles::{attester, validator}; -use zksync_test_account::Account; +use zksync_test_contracts::Account; use zksync_types::{ethabi, Execute, Transaction, U256}; use super::*; @@ -74,7 +74,7 @@ impl Registry { let tx = account.get_deploy_tx( &abi::ConsensusRegistry::bytecode(), None, - zksync_test_account::TxType::L2, + zksync_test_contracts::TxType::L2, ); (Address::new(tx.address), tx.tx) } diff --git a/core/node/consensus/src/registry/tests.rs b/core/node/consensus/src/registry/tests.rs index 89afc20e1d57..15329077a651 100644 --- a/core/node/consensus/src/registry/tests.rs +++ b/core/node/consensus/src/registry/tests.rs @@ -1,7 +1,7 @@ use rand::Rng as _; use zksync_concurrency::{ctx, scope, time}; use zksync_consensus_roles::{attester, validator::testonly::Setup}; -use zksync_test_account::Account; +use zksync_test_contracts::Account; use zksync_types::ProtocolVersionId; use super::*; diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index c30398498a94..6ec5794e968d 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -158,7 +158,7 @@ impl<'a> Connection<'a> { &mut self, ctx: &ctx::Ctx, cert: &attester::BatchQC, - ) -> Result<(), super::InsertCertificateError> { + ) -> ctx::Result<()> { Ok(ctx .wait(self.0.consensus_dal().insert_batch_certificate(cert)) .await??) diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index 7267d7e1c822..c42e78658dc2 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -38,6 +38,7 @@ fn to_fetched_block( l1_gas_price: payload.l1_gas_price, l2_fair_gas_price: payload.l2_fair_gas_price, fair_pubdata_price: payload.fair_pubdata_price, + pubdata_params: payload.pubdata_params, virtual_blocks: payload.virtual_blocks, operator_address: payload.operator_address, transactions: payload @@ -113,14 +114,12 @@ impl Store { } /// Number of the next block to queue. - pub(crate) async fn next_block( - &self, - ctx: &ctx::Ctx, - ) -> ctx::OrCanceled> { + pub(crate) async fn next_block(&self, ctx: &ctx::Ctx) -> ctx::Result { Ok(sync::lock(ctx, &self.block_payloads) .await? .as_ref() - .map(|p| p.next())) + .context("payload_queue not set")? + .next()) } /// Queues the next block. @@ -254,9 +253,7 @@ impl StoreRunner { Err(InsertCertificateError::Canceled(err)) => { return Err(ctx::Error::Canceled(err)) } - Err(InsertCertificateError::Inner(err)) => { - return Err(ctx::Error::Internal(anyhow::Error::from(err))) - } + Err(err) => Err(err).context("insert_block_certificate()")?, } } diff --git a/core/node/consensus/src/storage/testonly.rs b/core/node/consensus/src/storage/testonly.rs index 2aed011d23cf..0f29e2468267 100644 --- a/core/node/consensus/src/storage/testonly.rs +++ b/core/node/consensus/src/storage/testonly.rs @@ -73,7 +73,8 @@ impl ConnectionPool { L1BatchNumber(23), L2BlockNumber(87), vec![], - mock_genesis_params(protocol_version), + &BaseSystemContracts::load_from_disk(), + protocol_version, )) .await } diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 98c0d6b08131..225a38aee760 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -30,25 +30,20 @@ use zksync_state_keeper::{ executor::MainBatchExecutorFactory, io::{IoCursor, L1BatchParams, L2BlockParams}, seal_criteria::NoopSealer, - testonly::{ - fund, l1_transaction, l2_transaction, test_batch_executor::MockReadStorageFactory, - MockBatchExecutor, - }, + testonly::{fee, fund, test_batch_executor::MockReadStorageFactory, MockBatchExecutor}, AsyncRocksdbCache, OutputHandler, StateKeeperPersistence, TreeWritesPersistence, ZkSyncStateKeeper, }; -use zksync_test_account::Account; +use zksync_test_contracts::Account; use zksync_types::{ ethabi, fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput}, - L1BatchNumber, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, Transaction, + Address, Execute, L1BatchNumber, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, + Transaction, }; use zksync_web3_decl::client::{Client, DynClient, L2}; -use crate::{ - en, - storage::{ConnectionPool, Store}, -}; +use crate::{en, storage::ConnectionPool}; /// Fake StateKeeper for tests. #[derive(Debug)] @@ -73,7 +68,6 @@ pub(super) struct ConfigSet { net: network::Config, pub(super) config: config::ConsensusConfig, pub(super) secrets: config::ConsensusSecrets, - pub(super) enable_pregenesis: bool, } impl ConfigSet { @@ -83,17 +77,11 @@ impl ConfigSet { config: make_config(&net, None), secrets: make_secrets(&net, None), net, - enable_pregenesis: self.enable_pregenesis, } } } -pub(super) fn new_configs( - rng: &mut impl Rng, - setup: &Setup, - seed_peers: usize, - pregenesis: bool, -) -> Vec { +pub(super) fn new_configs(rng: &mut impl Rng, setup: &Setup, seed_peers: usize) -> Vec { let net_cfgs = network::testonly::new_configs(rng, setup, 0); let genesis_spec = config::GenesisSpec { chain_id: setup.genesis.chain_id.0.try_into().unwrap(), @@ -133,7 +121,6 @@ pub(super) fn new_configs( config: make_config(&net, Some(genesis_spec.clone())), secrets: make_secrets(&net, setup.attester_keys.get(i).cloned()), net, - enable_pregenesis: pregenesis, }) .collect() } @@ -219,11 +206,10 @@ impl StateKeeper { .wait(IoCursor::for_fetcher(&mut conn.0)) .await? .context("IoCursor::new()")?; - let batch_sealed = ctx - .wait(conn.0.blocks_dal().get_unsealed_l1_batch()) + let pending_batch = ctx + .wait(conn.0.blocks_dal().pending_batch_exists()) .await? - .context("get_unsealed_l1_batch()")? - .is_none(); + .context("pending_batch_exists()")?; let (actions_sender, actions_queue) = ActionQueue::new(); let addr = sync::watch::channel(None).0; let sync_state = SyncState::default(); @@ -259,7 +245,7 @@ impl StateKeeper { last_batch: cursor.l1_batch, last_block: cursor.next_l2_block - 1, last_timestamp: cursor.prev_l2_block_timestamp, - batch_sealed, + batch_sealed: !pending_batch, next_priority_op: PriorityOpId(1), actions_sender, sync_state: sync_state.clone(), @@ -296,6 +282,7 @@ impl StateKeeper { timestamp: self.last_timestamp, virtual_blocks: 1, }, + pubdata_params: Default::default(), }, number: self.last_batch, first_l2_block_number: self.last_block, @@ -326,12 +313,15 @@ impl StateKeeper { /// Pushes a new L2 block with `transactions` transactions to the `StateKeeper`. pub async fn push_random_block(&mut self, rng: &mut impl Rng, account: &mut Account) { let txs: Vec<_> = (0..rng.gen_range(3..8)) - .map(|_| match rng.gen() { - true => l2_transaction(account, 1_000_000), - false => { - let tx = l1_transaction(account, self.next_priority_op); - self.next_priority_op += 1; - tx + .map(|_| { + let execute = Execute::transfer(Address::random(), 0.into()); + match rng.gen() { + true => account.get_l2_tx_for_execute(execute, Some(fee(1_000_000))), + false => { + let tx = account.get_l1_tx(execute, self.next_priority_op.0); + self.next_priority_op += 1; + tx + } } }) .collect(); @@ -421,40 +411,6 @@ impl StateKeeper { .await } - pub async fn run_temporary_fetcher( - self, - ctx: &ctx::Ctx, - client: Box>, - ) -> ctx::Result<()> { - scope::run!(ctx, |ctx, s| async { - let payload_queue = self - .pool - .connection(ctx) - .await - .wrap("connection()")? - .new_payload_queue(ctx, self.actions_sender, self.sync_state.clone()) - .await - .wrap("new_payload_queue()")?; - let (store, runner) = Store::new( - ctx, - self.pool.clone(), - Some(payload_queue), - Some(client.clone()), - ) - .await - .wrap("Store::new()")?; - s.spawn_bg(async { Ok(runner.run(ctx).await?) }); - en::EN { - pool: self.pool.clone(), - client, - sync_state: self.sync_state.clone(), - } - .temporary_block_fetcher(ctx, &store) - .await - }) - .await - } - /// Runs consensus node for the external node. pub async fn run_consensus( self, @@ -473,7 +429,6 @@ impl StateKeeper { cfgs.config, cfgs.secrets, cfgs.net.build_version, - cfgs.enable_pregenesis, ) .await } @@ -569,9 +524,11 @@ impl StateKeeperRunner { let (stop_send, stop_recv) = sync::watch::channel(false); let (persistence, l2_block_sealer) = StateKeeperPersistence::new( self.pool.0.clone(), - ethabi::Address::repeat_byte(11), + Some(ethabi::Address::repeat_byte(11)), 5, - ); + ) + .await + .unwrap(); let io = ExternalIO::new( self.pool.0.clone(), @@ -676,9 +633,11 @@ impl StateKeeperRunner { let (stop_send, stop_recv) = sync::watch::channel(false); let (persistence, l2_block_sealer) = StateKeeperPersistence::new( self.pool.0.clone(), - ethabi::Address::repeat_byte(11), + Some(ethabi::Address::repeat_byte(11)), 5, - ); + ) + .await + .unwrap(); let tree_writes_persistence = TreeWritesPersistence::new(self.pool.0.clone()); let io = ExternalIO::new( diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs index 2701a986e9e9..6f24fbe65b4c 100644 --- a/core/node/consensus/src/tests/attestation.rs +++ b/core/node/consensus/src/tests/attestation.rs @@ -1,6 +1,6 @@ use anyhow::Context as _; use rand::Rng as _; -use test_casing::{test_casing, Product}; +use test_casing::test_casing; use tracing::Instrument as _; use zksync_concurrency::{ctx, error::Wrap, scope}; use zksync_consensus_roles::{ @@ -8,11 +8,11 @@ use zksync_consensus_roles::{ validator::testonly::{Setup, SetupSpec}, }; use zksync_dal::consensus_dal; -use zksync_test_account::Account; +use zksync_test_contracts::Account; use zksync_types::ProtocolVersionId; use zksync_web3_decl::namespaces::EnNamespaceClient as _; -use super::{POLL_INTERVAL, PREGENESIS, VERSIONS}; +use super::{POLL_INTERVAL, VERSIONS}; use crate::{ mn::run_main_node, registry::{testonly, Registry}, @@ -126,9 +126,9 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { // Test running a couple of attesters (which are also validators). // Main node is expected to collect all certificates. // External nodes are expected to just vote for the batch. -#[test_casing(4, Product((VERSIONS,PREGENESIS)))] +#[test_casing(2, VERSIONS)] #[tokio::test] -async fn test_multiple_attesters(version: ProtocolVersionId, pregenesis: bool) { +async fn test_multiple_attesters(version: ProtocolVersionId) { const NODES: usize = 4; zksync_concurrency::testonly::abort_on_panic(); @@ -137,7 +137,7 @@ async fn test_multiple_attesters(version: ProtocolVersionId, pregenesis: bool) { let account = &mut Account::random(); let to_fund = &[account.address]; let setup = Setup::new(rng, 4); - let mut cfgs = new_configs(rng, &setup, NODES, pregenesis); + let mut cfgs = new_configs(rng, &setup, NODES); scope::run!(ctx, |ctx, s| async { let validator_pool = ConnectionPool::test(false, version).await; let (mut validator, runner) = StateKeeper::new(ctx, validator_pool.clone()).await?; diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 8da17cfba8ac..c7697ba8480e 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -11,12 +11,12 @@ use zksync_consensus_roles::{ }; use zksync_consensus_storage::{BlockStore, PersistentBlockStore}; use zksync_dal::consensus_dal; -use zksync_test_account::Account; +use zksync_test_contracts::Account; use zksync_types::ProtocolVersionId; use zksync_web3_decl::namespaces::EnNamespaceClient as _; use crate::{ - en::TEMPORARY_FETCHER_THRESHOLD, + en::FALLBACK_FETCHER_THRESHOLD, mn::run_main_node, storage::{ConnectionPool, Store}, testonly, @@ -26,7 +26,6 @@ mod attestation; const VERSIONS: [ProtocolVersionId; 2] = [ProtocolVersionId::latest(), ProtocolVersionId::next()]; const FROM_SNAPSHOT: [bool; 2] = [true, false]; -const PREGENESIS: [bool; 2] = [true, false]; const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(500); #[test_casing(2, VERSIONS)] @@ -190,14 +189,14 @@ async fn test_validator_block_store(version: ProtocolVersionId) { // In the current implementation, consensus certificates are created asynchronously // for the L2 blocks constructed by the StateKeeper. This means that consensus actor // is effectively just back filling the consensus certificates for the L2 blocks in storage. -#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] -async fn test_validator(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { +async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { @@ -254,14 +253,14 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId, pregene } // Test running a validator node and 2 full nodes recovered from different snapshots. -#[test_casing(4, Product((VERSIONS,PREGENESIS)))] +#[test_casing(2, VERSIONS)] #[tokio::test] -async fn test_nodes_from_various_snapshots(version: ProtocolVersionId, pregenesis: bool) { +async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { @@ -335,14 +334,14 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId, pregenesi .unwrap(); } -#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] -async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { +async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let mut validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let mut validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -412,16 +411,16 @@ async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId, pre // Test running a validator node and a couple of full nodes. // Validator is producing signed blocks and fetchers are expected to fetch // them directly or indirectly. -#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] -async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { +async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { const NODES: usize = 2; zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let account = &mut Account::random(); // topology: @@ -500,16 +499,16 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId, pregen } // Test running external node (non-leader) validators. -#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] -async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { +async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { const NODES: usize = 3; zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, NODES); - let cfgs = testonly::new_configs(rng, &setup, 1, pregenesis); + let cfgs = testonly::new_configs(rng, &setup, 1); let account = &mut Account::random(); // Run all nodes in parallel. @@ -583,18 +582,14 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId, pre } // Test fetcher back filling missing certs. -#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] -async fn test_p2p_fetcher_backfill_certs( - from_snapshot: bool, - version: ProtocolVersionId, - pregenesis: bool, -) { +async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -668,16 +663,16 @@ async fn test_p2p_fetcher_backfill_certs( } // Test temporary fetcher fetching blocks if a lot of certs are missing. -#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] -async fn test_temporary_fetcher(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { +async fn test_fallback_fetcher(from_snapshot: bool, version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); // We force certs to be missing on EN by having 1 of the validators permanently offline. // This way no blocks will be finalized at all, so no one will have certs. let setup = Setup::new(rng, 2); - let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -710,7 +705,7 @@ async fn test_temporary_fetcher(from_snapshot: bool, version: ProtocolVersionId, s.spawn_bg(runner.run(ctx)); s.spawn_bg(node.run_fetcher(ctx, client.clone())); validator - .push_random_blocks(rng, account, TEMPORARY_FETCHER_THRESHOLD as usize + 1) + .push_random_blocks(rng, account, FALLBACK_FETCHER_THRESHOLD as usize + 1) .await; node_pool .wait_for_payload(ctx, validator.last_block()) @@ -720,9 +715,7 @@ async fn test_temporary_fetcher(from_snapshot: bool, version: ProtocolVersionId, .await .unwrap(); - tracing::info!( - "Run p2p fetcher. Blocks should be fetched by the temporary fetcher anyway." - ); + tracing::info!("Run p2p fetcher. Blocks should be fetched by the fallback fetcher anyway."); scope::run!(ctx, |ctx, s| async { let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); @@ -741,70 +734,14 @@ async fn test_temporary_fetcher(from_snapshot: bool, version: ProtocolVersionId, .unwrap(); } -// Test that temporary fetcher terminates once enough blocks have certs. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] -#[tokio::test] -async fn test_temporary_fetcher_termination(from_snapshot: bool, version: ProtocolVersionId) { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); - let rng = &mut ctx.rng(); - let setup = Setup::new(rng, 1); - let pregenesis = true; - let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); - let node_cfg = validator_cfg.new_fullnode(rng); - let account = &mut Account::random(); - - scope::run!(ctx, |ctx, s| async { - tracing::info!("Spawn validator."); - let validator_pool = ConnectionPool::test(from_snapshot, version).await; - let (mut validator, runner) = - testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; - s.spawn_bg(runner.run(ctx)); - s.spawn_bg(run_main_node( - ctx, - validator_cfg.config.clone(), - validator_cfg.secrets.clone(), - validator_pool.clone(), - )); - // API server needs at least 1 L1 batch to start. - validator.seal_batch().await; - let client = validator.connect(ctx).await?; - - let node_pool = ConnectionPool::test(from_snapshot, version).await; - - // Run the EN so the consensus is initialized on EN and wait for it to sync. - scope::run!(ctx, |ctx, s| async { - let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; - s.spawn_bg(runner.run(ctx)); - s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); - validator.push_random_blocks(rng, account, 5).await; - node_pool - .wait_for_payload(ctx, validator.last_block()) - .await?; - Ok(()) - }) - .await - .unwrap(); - - // Run the temporary fetcher. It should terminate immediately, since EN is synced. - let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; - s.spawn_bg(runner.run(ctx)); - node.run_temporary_fetcher(ctx, client).await?; - - Ok(()) - }) - .await - .unwrap(); -} - -#[test_casing(4, Product((VERSIONS,PREGENESIS)))] +#[test_casing(2, VERSIONS)] #[tokio::test] -async fn test_with_pruning(version: ProtocolVersionId, pregenesis: bool) { +async fn test_with_pruning(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs index 46b84c34061d..81d26ebc3758 100644 --- a/core/node/consensus/src/vm.rs +++ b/core/node/consensus/src/vm.rs @@ -8,10 +8,11 @@ use zksync_state::PostgresStorage; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ethabi, fee::Fee, l2::L2Tx, AccountTreeId, L2ChainId, Nonce, U256}; use zksync_vm_executor::oneshot::{ - CallOrExecute, MainOneshotExecutor, MultiVMBaseSystemContracts, OneshotEnvParameters, + CallOrExecute, MainOneshotExecutor, MultiVmBaseSystemContracts, OneshotEnvParameters, }; use zksync_vm_interface::{ - executor::OneshotExecutor, ExecutionResult, OneshotTracingParams, TxExecutionArgs, + executor::OneshotExecutor, storage::StorageWithOverrides, ExecutionResult, + OneshotTracingParams, TxExecutionArgs, }; use crate::{abi, storage::ConnectionPool}; @@ -28,7 +29,7 @@ impl VM { /// Constructs a new `VM` instance. pub async fn new(pool: ConnectionPool) -> Self { let base_system_contracts = - scope::wait_blocking(MultiVMBaseSystemContracts::load_eth_call_blocking).await; + scope::wait_blocking(MultiVmBaseSystemContracts::load_eth_call_blocking).await; Self { pool, // L2 chain ID and fee account don't seem to matter for calls, hence the use of default values. @@ -89,7 +90,7 @@ impl VM { let output = ctx .wait(self.executor.inspect_transaction_with_bytecode_compression( - storage, + StorageWithOverrides::new(storage), env, TxExecutionArgs::for_eth_call(tx), OneshotTracingParams::default(), diff --git a/core/node/consistency_checker/src/lib.rs b/core/node/consistency_checker/src/lib.rs index 20ba43a4166e..e13e479117cc 100644 --- a/core/node/consistency_checker/src/lib.rs +++ b/core/node/consistency_checker/src/lib.rs @@ -19,7 +19,7 @@ use zksync_types::{ commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, ethabi, ethabi::Token, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, Address, L1BatchNumber, ProtocolVersionId, H256, U256, }; @@ -224,7 +224,7 @@ impl LocalL1BatchCommitData { .context("cannot detect DA source from reference commitment token")?; // For `PubdataDA::Calldata`, it's required that the pubdata fits into a single blob. - if matches!(da, PubdataDA::Calldata) { + if matches!(da, PubdataSendingMode::Calldata) { let pubdata_len = self .l1_batch .header @@ -258,7 +258,7 @@ impl LocalL1BatchCommitData { pub fn detect_da( protocol_version: ProtocolVersionId, reference: &Token, -) -> Result { +) -> Result { /// These are used by the L1 Contracts to indicate what DA layer is used for pubdata const PUBDATA_SOURCE_CALLDATA: u8 = 0; const PUBDATA_SOURCE_BLOBS: u8 = 1; @@ -269,7 +269,7 @@ pub fn detect_da( } if protocol_version.is_pre_1_4_2() { - return Ok(PubdataDA::Calldata); + return Ok(PubdataSendingMode::Calldata); } let reference = match reference { @@ -291,9 +291,9 @@ pub fn detect_da( ))), }; match last_reference_token.first() { - Some(&byte) if byte == PUBDATA_SOURCE_CALLDATA => Ok(PubdataDA::Calldata), - Some(&byte) if byte == PUBDATA_SOURCE_BLOBS => Ok(PubdataDA::Blobs), - Some(&byte) if byte == PUBDATA_SOURCE_CUSTOM => Ok(PubdataDA::Custom), + Some(&byte) if byte == PUBDATA_SOURCE_CALLDATA => Ok(PubdataSendingMode::Calldata), + Some(&byte) if byte == PUBDATA_SOURCE_BLOBS => Ok(PubdataSendingMode::Blobs), + Some(&byte) if byte == PUBDATA_SOURCE_CUSTOM => Ok(PubdataSendingMode::Custom), Some(&byte) => Err(parse_error(format!( "unexpected first byte of the last reference token; expected one of [{PUBDATA_SOURCE_CALLDATA}, {PUBDATA_SOURCE_BLOBS}], \ got {byte}" diff --git a/core/node/consistency_checker/src/tests/mod.rs b/core/node/consistency_checker/src/tests/mod.rs index 40c447071cf4..b09ef2b2272c 100644 --- a/core/node/consistency_checker/src/tests/mod.rs +++ b/core/node/consistency_checker/src/tests/mod.rs @@ -64,7 +64,7 @@ pub(crate) fn build_commit_tx_input_data( let tokens = CommitBatches { last_committed_l1_batch: &batches[0], l1_batches: batches, - pubdata_da: PubdataDA::Calldata, + pubdata_da: PubdataSendingMode::Calldata, mode, } .into_tokens(); @@ -167,7 +167,7 @@ fn build_commit_tx_input_data_is_correct(commitment_mode: L1BatchCommitmentMode) .unwrap(); assert_eq!( commit_data, - CommitBatchInfo::new(commitment_mode, batch, PubdataDA::Calldata).into_token(), + CommitBatchInfo::new(commitment_mode, batch, PubdataSendingMode::Calldata).into_token(), ); } } diff --git a/core/node/contract_verification_server/Cargo.toml b/core/node/contract_verification_server/Cargo.toml index eeb2c7828467..e6a81fe6026a 100644 --- a/core/node/contract_verification_server/Cargo.toml +++ b/core/node/contract_verification_server/Cargo.toml @@ -11,7 +11,6 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_config.workspace = true zksync_dal.workspace = true zksync_types.workspace = true vise.workspace = true @@ -21,5 +20,11 @@ axum.workspace = true tokio = { workspace = true, features = ["time"] } tower-http = { workspace = true, features = ["cors"] } tracing.workspace = true -serde.workspace = true + +[dev-dependencies] +zksync_node_test_utils.workspace = true + +http-body-util.workspace = true serde_json.workspace = true +test-casing.workspace = true +tower.workspace = true diff --git a/core/node/contract_verification_server/src/api_decl.rs b/core/node/contract_verification_server/src/api_decl.rs index 256062936d32..d451cd79add9 100644 --- a/core/node/contract_verification_server/src/api_decl.rs +++ b/core/node/contract_verification_server/src/api_decl.rs @@ -3,10 +3,13 @@ use std::sync::Arc; use tower_http::cors::CorsLayer; use zksync_dal::{ConnectionPool, Core}; +use crate::cache::SupportedCompilersCache; + #[derive(Debug, Clone)] -pub struct RestApi { - pub(super) master_connection_pool: ConnectionPool, - pub(super) replica_connection_pool: ConnectionPool, +pub(crate) struct RestApi { + pub(crate) master_connection_pool: ConnectionPool, + pub(crate) replica_connection_pool: ConnectionPool, + pub(crate) supported_compilers: Arc, } impl RestApi { @@ -14,7 +17,9 @@ impl RestApi { master_connection_pool: ConnectionPool, replica_connection_pool: ConnectionPool, ) -> Self { + let supported_compilers = SupportedCompilersCache::new(replica_connection_pool.clone()); Self { + supported_compilers: Arc::new(supported_compilers), master_connection_pool, replica_connection_pool, } diff --git a/core/node/contract_verification_server/src/api_impl.rs b/core/node/contract_verification_server/src/api_impl.rs index b8111e98a1cc..b0336fd284b6 100644 --- a/core/node/contract_verification_server/src/api_impl.rs +++ b/core/node/contract_verification_server/src/api_impl.rs @@ -1,195 +1,234 @@ -use std::sync::Arc; +use std::{collections::HashSet, iter, sync::Arc}; +use anyhow::Context as _; use axum::{ extract::{Path, State}, - response::Response, + http::StatusCode, + response::{IntoResponse, Response}, Json, }; -use serde::Serialize; -use zksync_dal::CoreDal; -use zksync_types::{contract_verification_api::VerificationIncomingRequest, Address}; +use zksync_dal::{CoreDal, DalError}; +use zksync_types::{ + bytecode::BytecodeMarker, + contract_verification_api::{ + CompilerVersions, VerificationIncomingRequest, VerificationInfo, VerificationRequestStatus, + }, + Address, +}; use super::{api_decl::RestApi, metrics::METRICS}; -fn ok_json(data: impl Serialize) -> Response { - Response::builder() - .status(axum::http::StatusCode::OK) - .body(serde_json::to_string(&data).expect("Failed to serialize")) - .unwrap() +#[derive(Debug)] +pub(crate) enum ApiError { + IncorrectCompilerVersions, + UnsupportedCompilerVersions, + MissingZkCompilerVersion, + BogusZkCompilerVersion, + NoDeployedContract, + RequestNotFound, + VerificationInfoNotFound, + Internal(anyhow::Error), +} + +impl From for ApiError { + fn from(err: anyhow::Error) -> Self { + Self::Internal(err) + } +} + +impl From for ApiError { + fn from(err: DalError) -> Self { + Self::Internal(err.generalize()) + } } -fn bad_request(message: &str) -> Response { - Response::builder() - .status(axum::http::StatusCode::BAD_REQUEST) - .body(message.to_string()) - .unwrap() +impl ApiError { + pub fn message(&self) -> &'static str { + match self { + Self::IncorrectCompilerVersions => "incorrect compiler versions", + Self::UnsupportedCompilerVersions => "unsupported compiler versions", + Self::MissingZkCompilerVersion => "missing zk compiler version for EraVM bytecode", + Self::BogusZkCompilerVersion => "zk compiler version specified for EVM bytecode", + Self::NoDeployedContract => "There is no deployed contract on this address", + Self::RequestNotFound => "request not found", + Self::VerificationInfoNotFound => "verification info not found for address", + Self::Internal(_) => "internal server error", + } + } } -fn not_found() -> Response { - Response::builder() - .status(axum::http::StatusCode::NOT_FOUND) - .body(String::new()) - .unwrap() +impl IntoResponse for ApiError { + fn into_response(self) -> Response { + let status_code = match &self { + Self::IncorrectCompilerVersions + | Self::UnsupportedCompilerVersions + | Self::MissingZkCompilerVersion + | Self::BogusZkCompilerVersion + | Self::NoDeployedContract => StatusCode::BAD_REQUEST, + + Self::RequestNotFound | Self::VerificationInfoNotFound => StatusCode::NOT_FOUND, + + Self::Internal(err) => { + // Do not expose the error details to the client, but log it. + tracing::warn!("Internal error: {err:#}"); + StatusCode::INTERNAL_SERVER_ERROR + } + }; + (status_code, self.message()).into_response() + } } +type ApiResult = Result, ApiError>; + impl RestApi { #[tracing::instrument(skip(query))] fn validate_contract_verification_query( query: &VerificationIncomingRequest, - ) -> Result<(), Response> { + ) -> Result<(), ApiError> { if query.source_code_data.compiler_type() != query.compiler_versions.compiler_type() { - return Err(bad_request("incorrect compiler versions")); + return Err(ApiError::IncorrectCompilerVersions); } - Ok(()) } + fn validate_compilers( + versions: &CompilerVersions, + bytecode_kind: BytecodeMarker, + ) -> Result<(), ApiError> { + match bytecode_kind { + BytecodeMarker::EraVm if versions.zk_compiler_version().is_none() => { + Err(ApiError::MissingZkCompilerVersion) + } + BytecodeMarker::Evm if versions.zk_compiler_version().is_some() => { + Err(ApiError::BogusZkCompilerVersion) + } + _ => Ok(()), + } + } + /// Add a contract verification job to the queue if the requested contract wasn't previously verified. + // FIXME: this doesn't seem to check that the contract isn't verified; should it? #[tracing::instrument(skip(self_, request))] pub async fn verification( State(self_): State>, Json(request): Json, - ) -> Response { + ) -> ApiResult { let method_latency = METRICS.call[&"contract_verification"].start(); - if let Err(res) = Self::validate_contract_verification_query(&request) { - return res; + Self::validate_contract_verification_query(&request)?; + + let is_compilation_supported = self_ + .supported_compilers + .get(|supported| supported.contain(&request.compiler_versions)) + .await?; + if !is_compilation_supported { + return Err(ApiError::UnsupportedCompilerVersions); } + let mut storage = self_ .master_connection_pool .connection_tagged("api") - .await - .unwrap(); - - if !storage + .await?; + let deployment_info = storage .storage_logs_dal() - .is_contract_deployed_at_address(request.contract_address) - .await - { - return bad_request("There is no deployed contract on this address"); - } + .filter_deployed_contracts(iter::once(request.contract_address), None) + .await?; + let &(_, bytecode_hash) = deployment_info + .get(&request.contract_address) + .ok_or(ApiError::NoDeployedContract)?; + let bytecode_marker = BytecodeMarker::new(bytecode_hash).with_context(|| { + format!( + "unknown bytecode marker for bytecode hash {bytecode_hash:?} at address {:?}", + request.contract_address + ) + })?; + Self::validate_compilers(&request.compiler_versions, bytecode_marker)?; let request_id = storage .contract_verification_dal() - .add_contract_verification_request(request) - .await - .unwrap(); - + .add_contract_verification_request(&request) + .await?; method_latency.observe(); - ok_json(request_id) + Ok(Json(request_id)) } #[tracing::instrument(skip(self_))] pub async fn verification_request_status( State(self_): State>, id: Path, - ) -> Response { + ) -> ApiResult { let method_latency = METRICS.call[&"contract_verification_request_status"].start(); let status = self_ .replica_connection_pool .connection_tagged("api") - .await - .unwrap() + .await? .contract_verification_dal() .get_verification_request_status(*id) - .await - .unwrap(); + .await? + .ok_or(ApiError::RequestNotFound)?; method_latency.observe(); - match status { - Some(status) => ok_json(status), - None => not_found(), - } + Ok(Json(status)) } #[tracing::instrument(skip(self_))] - pub async fn zksolc_versions(State(self_): State>) -> Response { + pub async fn zksolc_versions(State(self_): State>) -> ApiResult> { let method_latency = METRICS.call[&"contract_verification_zksolc_versions"].start(); let versions = self_ - .replica_connection_pool - .connection_tagged("api") - .await - .unwrap() - .contract_verification_dal() - .get_zksolc_versions() - .await - .unwrap(); - + .supported_compilers + .get(|supported| supported.zksolc.clone()) + .await?; method_latency.observe(); - ok_json(versions) + Ok(Json(versions)) } #[tracing::instrument(skip(self_))] - pub async fn solc_versions(State(self_): State>) -> Response { + pub async fn solc_versions(State(self_): State>) -> ApiResult> { let method_latency = METRICS.call[&"contract_verification_solc_versions"].start(); let versions = self_ - .replica_connection_pool - .connection_tagged("api") - .await - .unwrap() - .contract_verification_dal() - .get_solc_versions() - .await - .unwrap(); - + .supported_compilers + .get(|supported| supported.solc.clone()) + .await?; method_latency.observe(); - ok_json(versions) + Ok(Json(versions)) } #[tracing::instrument(skip(self_))] - pub async fn zkvyper_versions(State(self_): State>) -> Response { + pub async fn zkvyper_versions(State(self_): State>) -> ApiResult> { let method_latency = METRICS.call[&"contract_verification_zkvyper_versions"].start(); let versions = self_ - .replica_connection_pool - .connection_tagged("api") - .await - .unwrap() - .contract_verification_dal() - .get_zkvyper_versions() - .await - .unwrap(); - + .supported_compilers + .get(|supported| supported.zkvyper.clone()) + .await?; method_latency.observe(); - ok_json(versions) + Ok(Json(versions)) } #[tracing::instrument(skip(self_))] - pub async fn vyper_versions(State(self_): State>) -> Response { + pub async fn vyper_versions(State(self_): State>) -> ApiResult> { let method_latency = METRICS.call[&"contract_verification_vyper_versions"].start(); let versions = self_ - .replica_connection_pool - .connection_tagged("api") - .await - .unwrap() - .contract_verification_dal() - .get_vyper_versions() - .await - .unwrap(); - + .supported_compilers + .get(|supported| supported.vyper.clone()) + .await?; method_latency.observe(); - ok_json(versions) + Ok(Json(versions)) } #[tracing::instrument(skip(self_))] pub async fn verification_info( State(self_): State>, address: Path
, - ) -> Response { + ) -> ApiResult { let method_latency = METRICS.call[&"contract_verification_info"].start(); - let info = self_ .replica_connection_pool .connection_tagged("api") - .await - .unwrap() + .await? .contract_verification_dal() .get_contract_verification_info(*address) - .await - .unwrap(); - + .await? + .ok_or(ApiError::VerificationInfoNotFound)?; method_latency.observe(); - match info { - Some(info) => ok_json(info), - None => not_found(), - } + Ok(Json(info)) } } diff --git a/core/node/contract_verification_server/src/cache.rs b/core/node/contract_verification_server/src/cache.rs new file mode 100644 index 000000000000..c8e367515287 --- /dev/null +++ b/core/node/contract_verification_server/src/cache.rs @@ -0,0 +1,122 @@ +use std::{ + collections::HashSet, + time::{Duration, Instant}, +}; + +use tokio::sync::RwLock; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; +use zksync_types::contract_verification_api::CompilerVersions; + +/// Compiler versions supported by the contract verifier. +#[derive(Debug, Clone)] +pub(crate) struct SupportedCompilerVersions { + pub solc: HashSet, + pub zksolc: HashSet, + pub vyper: HashSet, + pub zkvyper: HashSet, +} + +impl SupportedCompilerVersions { + /// Checks whether the supported compilers include ones specified in a request. + pub fn contain(&self, versions: &CompilerVersions) -> bool { + match versions { + CompilerVersions::Solc { + compiler_solc_version, + compiler_zksolc_version, + } => { + self.solc.contains(compiler_solc_version) + && compiler_zksolc_version + .as_ref() + .map_or(true, |ver| self.zksolc.contains(ver)) + } + CompilerVersions::Vyper { + compiler_vyper_version, + compiler_zkvyper_version, + } => { + self.vyper.contains(compiler_vyper_version) + && compiler_zkvyper_version + .as_ref() + .map_or(true, |ver| self.zkvyper.contains(ver)) + } + } + } +} + +impl SupportedCompilerVersions { + async fn new(connection: &mut Connection<'_, Core>) -> Result { + let solc = connection + .contract_verification_dal() + .get_solc_versions() + .await?; + let zksolc = connection + .contract_verification_dal() + .get_zksolc_versions() + .await?; + let vyper = connection + .contract_verification_dal() + .get_vyper_versions() + .await?; + let zkvyper = connection + .contract_verification_dal() + .get_zkvyper_versions() + .await?; + Ok(Self { + solc: solc.into_iter().collect(), + zksolc: zksolc.into_iter().collect(), + vyper: vyper.into_iter().collect(), + zkvyper: zkvyper.into_iter().collect(), + }) + } +} + +/// Cache for compiler versions supported by the contract verifier. +#[derive(Debug)] +pub(crate) struct SupportedCompilersCache { + connection_pool: ConnectionPool, + inner: RwLock>, +} + +impl SupportedCompilersCache { + const CACHE_UPDATE_INTERVAL: Duration = Duration::from_secs(10); + + pub fn new(connection_pool: ConnectionPool) -> Self { + Self { + connection_pool, + inner: RwLock::new(None), + } + } + + fn get_cached( + cache: Option<&(SupportedCompilerVersions, Instant)>, + action: impl FnOnce(&SupportedCompilerVersions) -> R, + ) -> Option { + cache.and_then(|(versions, updated_at)| { + (updated_at.elapsed() <= Self::CACHE_UPDATE_INTERVAL).then(|| action(versions)) + }) + } + + pub async fn get( + &self, + action: impl Fn(&SupportedCompilerVersions) -> R, + ) -> Result { + let output = Self::get_cached(self.inner.read().await.as_ref(), &action); + if let Some(output) = output { + return Ok(output); + } + + // We don't want to hold an exclusive lock while querying Postgres. + let supported = { + let mut connection = self.connection_pool.connection_tagged("api").await?; + let mut db_transaction = connection + .transaction_builder()? + .set_readonly() + .build() + .await?; + SupportedCompilerVersions::new(&mut db_transaction).await? + }; + let output = action(&supported); + // Another task may have written to the cache already, but we should be fine with updating it again. + *self.inner.write().await = Some((supported, Instant::now())); + Ok(output) + } +} diff --git a/core/node/contract_verification_server/src/lib.rs b/core/node/contract_verification_server/src/lib.rs index eea45f8564bf..912cec55f0b8 100644 --- a/core/node/contract_verification_server/src/lib.rs +++ b/core/node/contract_verification_server/src/lib.rs @@ -1,21 +1,24 @@ +use std::net::SocketAddr; + use anyhow::Context as _; use tokio::sync::watch; -use zksync_config::ContractVerifierConfig; use zksync_dal::ConnectionPool; use self::api_decl::RestApi; mod api_decl; mod api_impl; +mod cache; mod metrics; +#[cfg(test)] +mod tests; pub async fn start_server( master_connection_pool: ConnectionPool, replica_connection_pool: ConnectionPool, - config: ContractVerifierConfig, + bind_address: SocketAddr, mut stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { - let bind_address = config.bind_addr(); let api = RestApi::new(master_connection_pool, replica_connection_pool).into_router(); let listener = tokio::net::TcpListener::bind(bind_address) diff --git a/core/node/contract_verification_server/src/tests.rs b/core/node/contract_verification_server/src/tests.rs new file mode 100644 index 000000000000..c5c1d88b3d0c --- /dev/null +++ b/core/node/contract_verification_server/src/tests.rs @@ -0,0 +1,356 @@ +//! Tests for contract verification API server. + +use std::{str, time::Duration}; + +use axum::{ + body::Body, + http::{header, Method, Request, Response, StatusCode}, +}; +use http_body_util::BodyExt as _; +use test_casing::test_casing; +use tower::ServiceExt; +use zksync_dal::{Connection, Core, CoreDal}; +use zksync_node_test_utils::create_l2_block; +use zksync_types::{ + bytecode::{BytecodeHash, BytecodeMarker}, + contract_verification_api::CompilerVersions, + get_code_key, Address, L2BlockNumber, ProtocolVersion, StorageLog, +}; + +use super::*; +use crate::api_impl::ApiError; + +const SOLC_VERSION: &str = "0.8.27"; +const ZKSOLC_VERSION: &str = "1.5.6"; + +async fn prepare_storage(storage: &mut Connection<'_, Core>) { + storage + .protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + storage + .blocks_dal() + .insert_l2_block(&create_l2_block(0)) + .await + .unwrap(); + + storage + .contract_verification_dal() + .set_solc_versions(&[SOLC_VERSION.to_owned()]) + .await + .unwrap(); + storage + .contract_verification_dal() + .set_zksolc_versions(&[ZKSOLC_VERSION.to_owned()]) + .await + .unwrap(); +} + +async fn mock_deploy_contract( + storage: &mut Connection<'_, Core>, + address: Address, + kind: BytecodeMarker, +) { + let bytecode_hash = match kind { + BytecodeMarker::EraVm => BytecodeHash::for_bytecode(&[0; 32]).value(), + BytecodeMarker::Evm => BytecodeHash::for_evm_bytecode(&[0; 96]).value(), + }; + let deploy_log = StorageLog::new_write_log(get_code_key(&address), bytecode_hash); + storage + .storage_logs_dal() + .append_storage_logs(L2BlockNumber(0), &[deploy_log]) + .await + .unwrap() +} + +fn post_request(body: &serde_json::Value) -> Request { + Request::builder() + .method(Method::POST) + .uri("/contract_verification") + .header(header::CONTENT_TYPE, "application/json") + .body(Body::from(serde_json::to_vec(body).unwrap())) + .unwrap() +} + +async fn json_response(response: Response) -> serde_json::Value { + assert_eq!(response.status(), StatusCode::OK); + assert_eq!( + response.headers().get(header::CONTENT_TYPE).unwrap(), + "application/json" + ); + let response = response.into_body(); + let response = response.collect().await.unwrap().to_bytes(); + serde_json::from_slice(&response).unwrap() +} + +#[tokio::test] +async fn getting_compiler_versions() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + prepare_storage(&mut storage).await; + + let router = RestApi::new(pool.clone(), pool).into_router(); + let req = Request::builder() + .method(Method::GET) + .uri("/contract_verification/zksolc_versions") + .body(Body::empty()) + .unwrap(); + let response = router.clone().oneshot(req).await.unwrap(); + let versions = json_response(response).await; + assert_eq!(versions, serde_json::json!([ZKSOLC_VERSION])); + + let req = Request::builder() + .method(Method::GET) + .uri("/contract_verification/solc_versions") + .body(Body::empty()) + .unwrap(); + let response = router.oneshot(req).await.unwrap(); + let versions = json_response(response).await; + assert_eq!(versions, serde_json::json!([SOLC_VERSION])); +} + +#[test_casing(2, [BytecodeMarker::EraVm, BytecodeMarker::Evm])] +#[tokio::test] +async fn submitting_request(bytecode_kind: BytecodeMarker) { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + prepare_storage(&mut storage).await; + + let address = Address::repeat_byte(0x23); + let verification_request = serde_json::json!({ + "contractAddress": address, + "sourceCode": "contract Test {}", + "contractName": "Test", + "compilerZksolcVersion": match bytecode_kind { + BytecodeMarker::EraVm => Some(ZKSOLC_VERSION), + BytecodeMarker::Evm => None, + }, + "compilerSolcVersion": SOLC_VERSION, + "optimizationUsed": true, + }); + + let router = RestApi::new(pool.clone(), pool).into_router(); + let response = router + .clone() + .oneshot(post_request(&verification_request)) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::BAD_REQUEST); // the address is not deployed to + let error_message = response.collect().await.unwrap().to_bytes(); + let error_message = str::from_utf8(&error_message).unwrap(); + assert_eq!(error_message, ApiError::NoDeployedContract.message()); + + mock_deploy_contract(&mut storage, address, bytecode_kind).await; + + let response = router + .clone() + .oneshot(post_request(&verification_request)) + .await + .unwrap(); + let id = json_response(response).await; + assert_eq!(id, serde_json::json!(1)); + + let request = storage + .contract_verification_dal() + .get_next_queued_verification_request(Duration::from_secs(600)) + .await + .unwrap() + .expect("request not persisted"); + assert_eq!(request.id, 1); + assert_eq!(request.req.contract_address, address); + assert_eq!( + request.req.compiler_versions, + CompilerVersions::Solc { + compiler_zksolc_version: match bytecode_kind { + BytecodeMarker::EraVm => Some(ZKSOLC_VERSION.to_owned()), + BytecodeMarker::Evm => None, + }, + compiler_solc_version: SOLC_VERSION.to_owned(), + } + ); + assert_eq!(request.req.contract_name, "Test"); + assert!(request.req.optimization_used); + + let req = Request::builder() + .method(Method::GET) + .uri("/contract_verification/1") + .body(Body::empty()) + .unwrap(); + let response = router.oneshot(req).await.unwrap(); + let request_status = json_response(response).await; + assert_eq!(request_status["status"], "in_progress"); +} + +#[test_casing(2, [BytecodeMarker::EraVm, BytecodeMarker::Evm])] +#[tokio::test] +async fn submitting_request_with_invalid_compiler_type(bytecode_kind: BytecodeMarker) { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + prepare_storage(&mut storage).await; + + let address = Address::repeat_byte(0x23); + mock_deploy_contract(&mut storage, address, bytecode_kind).await; + + let verification_request = serde_json::json!({ + "contractAddress": address, + "sourceCode": "contract Test {}", + "contractName": "Test", + // Intentionally incorrect versions "shape" + "compilerZksolcVersion": match bytecode_kind { + BytecodeMarker::Evm => Some(ZKSOLC_VERSION), + BytecodeMarker::EraVm => None, + }, + "compilerSolcVersion": SOLC_VERSION, + "optimizationUsed": true, + }); + let router = RestApi::new(pool.clone(), pool).into_router(); + let response = router + .oneshot(post_request(&verification_request)) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::BAD_REQUEST); + let error_message = response.collect().await.unwrap().to_bytes(); + let error_message = str::from_utf8(&error_message).unwrap(); + let expected_message = match bytecode_kind { + BytecodeMarker::Evm => ApiError::BogusZkCompilerVersion.message(), + BytecodeMarker::EraVm => ApiError::MissingZkCompilerVersion.message(), + }; + assert_eq!(error_message, expected_message); +} + +#[test_casing(2, [BytecodeMarker::EraVm, BytecodeMarker::Evm])] +#[tokio::test] +async fn submitting_request_with_unsupported_solc(bytecode_kind: BytecodeMarker) { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + prepare_storage(&mut storage).await; + + let address = Address::repeat_byte(0x23); + mock_deploy_contract(&mut storage, address, bytecode_kind).await; + + let verification_request = serde_json::json!({ + "contractAddress": address, + "sourceCode": "contract Test {}", + "contractName": "Test", + "compilerZksolcVersion": match bytecode_kind { + BytecodeMarker::Evm => None, + BytecodeMarker::EraVm => Some(ZKSOLC_VERSION), + }, + "compilerSolcVersion": "1.0.0", + "optimizationUsed": true, + }); + let router = RestApi::new(pool.clone(), pool).into_router(); + let response = router + .oneshot(post_request(&verification_request)) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::BAD_REQUEST); + let error_message = response.collect().await.unwrap().to_bytes(); + let error_message = str::from_utf8(&error_message).unwrap(); + assert_eq!( + error_message, + ApiError::UnsupportedCompilerVersions.message() + ); +} + +#[tokio::test] +async fn submitting_request_with_unsupported_zksolc() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + prepare_storage(&mut storage).await; + + let address = Address::repeat_byte(0x23); + mock_deploy_contract(&mut storage, address, BytecodeMarker::EraVm).await; + + let verification_request = serde_json::json!({ + "contractAddress": address, + "sourceCode": "contract Test {}", + "contractName": "Test", + "compilerZksolcVersion": "1000.0.0", + "compilerSolcVersion": SOLC_VERSION, + "optimizationUsed": true, + }); + let router = RestApi::new(pool.clone(), pool).into_router(); + let response = router + .oneshot(post_request(&verification_request)) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::BAD_REQUEST); + let error_message = response.collect().await.unwrap().to_bytes(); + let error_message = str::from_utf8(&error_message).unwrap(); + assert_eq!( + error_message, + ApiError::UnsupportedCompilerVersions.message() + ); +} + +#[tokio::test] +async fn querying_missing_request() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + prepare_storage(&mut storage).await; + let router = RestApi::new(pool.clone(), pool).into_router(); + + let req = Request::builder() + .method(Method::GET) + .uri("/contract_verification/1") + .body(Body::empty()) + .unwrap(); + let response = router.oneshot(req).await.unwrap(); + + assert_eq!(response.status(), StatusCode::NOT_FOUND); + let error_message = response.collect().await.unwrap().to_bytes(); + let error_message = str::from_utf8(&error_message).unwrap(); + assert_eq!(error_message, ApiError::RequestNotFound.message()); +} + +#[tokio::test] +async fn querying_missing_verification_info() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + prepare_storage(&mut storage).await; + let router = RestApi::new(pool.clone(), pool).into_router(); + + let req = Request::builder() + .method(Method::GET) + .uri("/contract_verification/info/0x2323232323232323232323232323232323232323") + .body(Body::empty()) + .unwrap(); + let response = router.oneshot(req).await.unwrap(); + + assert_eq!(response.status(), StatusCode::NOT_FOUND); + let error_message = response.collect().await.unwrap().to_bytes(); + let error_message = str::from_utf8(&error_message).unwrap(); + assert_eq!(error_message, ApiError::VerificationInfoNotFound.message()); +} + +#[tokio::test] +async fn mismatched_compiler_type() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + prepare_storage(&mut storage).await; + let address = Address::repeat_byte(0x23); + mock_deploy_contract(&mut storage, address, BytecodeMarker::EraVm).await; + + let verification_request = serde_json::json!({ + "contractAddress": address, + "sourceCode": "contract Test {}", + "contractName": "Test", + "compilerVyperVersion": "1.0.1", + "optimizationUsed": true, + }); + + let router = RestApi::new(pool.clone(), pool).into_router(); + let response = router + .oneshot(post_request(&verification_request)) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::BAD_REQUEST); + let error_message = response.collect().await.unwrap().to_bytes(); + let error_message = str::from_utf8(&error_message).unwrap(); + assert_eq!(error_message, ApiError::IncorrectCompilerVersions.message()); +} diff --git a/core/node/da_clients/Cargo.toml b/core/node/da_clients/Cargo.toml index 60b65067f48d..e0c85b3030ab 100644 --- a/core/node/da_clients/Cargo.toml +++ b/core/node/da_clients/Cargo.toml @@ -23,6 +23,7 @@ zksync_types.workspace = true zksync_object_store.workspace = true zksync_da_client.workspace = true zksync_env_config.workspace = true +zksync_basic_types.workspace = true futures.workspace = true # Avail dependencies @@ -33,7 +34,24 @@ base58.workspace = true serde_json.workspace = true hex.workspace = true blake2b_simd.workspace = true - -jsonrpsee = { workspace = true, features = ["ws-client"] } parity-scale-codec = { workspace = true, features = ["derive"] } subxt-signer = { workspace = true, features = ["sr25519", "native"] } +jsonrpsee = { workspace = true, features = ["ws-client"] } +reqwest = { workspace = true } +bytes = { workspace = true } +backon.workspace = true + +# Celestia dependencies +http.workspace = true +bincode.workspace = true +celestia-types.workspace = true +secp256k1.workspace = true +sha2.workspace = true +prost.workspace = true +bech32.workspace = true +ripemd.workspace = true +tonic = { workspace = true, features = ["tls-roots", "prost", "codegen"] } +pbjson-types.workspace = true + +# Eigen dependencies +tokio-stream.workspace = true diff --git a/core/node/da_clients/README.md b/core/node/da_clients/README.md index df06cef24197..1b22e5198a68 100644 --- a/core/node/da_clients/README.md +++ b/core/node/da_clients/README.md @@ -8,3 +8,5 @@ Currently, the following DataAvailability clients are implemented: utilizing the DA framework. - `Object Store client` that stores the pubdata in the Object Store(GCS). - `Avail` that sends the pubdata to the Avail DA layer. +- `Celestia` that sends the pubdata to the Celestia DA layer. +- `Eigen` that sends the pubdata to the Eigen DA layer. diff --git a/core/node/da_clients/src/avail/client.rs b/core/node/da_clients/src/avail/client.rs index 7718691bf185..c0ead429d91a 100644 --- a/core/node/da_clients/src/avail/client.rs +++ b/core/node/da_clients/src/avail/client.rs @@ -1,34 +1,136 @@ -use std::{fmt::Debug, sync::Arc}; +use std::{fmt::Debug, sync::Arc, time::Duration}; +use anyhow::anyhow; use async_trait::async_trait; use jsonrpsee::ws_client::WsClientBuilder; +use serde::{Deserialize, Serialize}; use subxt_signer::ExposeSecret; -use zksync_config::configs::da_client::avail::{AvailConfig, AvailSecrets}; +use zksync_config::configs::da_client::avail::{AvailClientConfig, AvailConfig, AvailSecrets}; use zksync_da_client::{ types::{DAError, DispatchResponse, InclusionData}, DataAvailabilityClient, }; +use zksync_types::{ + ethabi::{self, Token}, + web3::contract::Tokenize, + H256, U256, +}; -use crate::avail::sdk::RawAvailClient; +use crate::{ + avail::sdk::{GasRelayClient, RawAvailClient}, + utils::{to_non_retriable_da_error, to_retriable_da_error}, +}; + +#[derive(Debug, Clone)] +enum AvailClientMode { + Default(Box), + GasRelay(GasRelayClient), +} /// An implementation of the `DataAvailabilityClient` trait that interacts with the Avail network. #[derive(Debug, Clone)] pub struct AvailClient { config: AvailConfig, - sdk_client: Arc, + sdk_client: Arc, + api_client: Arc, // bridge API reqwest client +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct BridgeAPIResponse { + blob_root: Option, + bridge_root: Option, + data_root_index: Option, + data_root_proof: Option>, + leaf: Option, + leaf_index: Option, + leaf_proof: Option>, + range_hash: Option, + error: Option, +} + +#[derive(Deserialize, Serialize, Debug)] +#[serde(rename_all = "camelCase")] +struct MerkleProofInput { + // proof of inclusion for the data root + data_root_proof: Vec, + // proof of inclusion of leaf within blob/bridge root + leaf_proof: Vec, + // abi.encodePacked(startBlock, endBlock) of header range commitment on vectorx + range_hash: H256, + // index of the data root in the commitment tree + data_root_index: U256, + // blob root to check proof against, or reconstruct the data root + blob_root: H256, + // bridge root to check proof against, or reconstruct the data root + bridge_root: H256, + // leaf being proven + leaf: H256, + // index of the leaf in the blob/bridge root tree + leaf_index: U256, +} + +impl Tokenize for MerkleProofInput { + fn into_tokens(self) -> Vec { + vec![Token::Tuple(vec![ + Token::Array( + self.data_root_proof + .iter() + .map(|x| Token::FixedBytes(x.as_bytes().to_vec())) + .collect(), + ), + Token::Array( + self.leaf_proof + .iter() + .map(|x| Token::FixedBytes(x.as_bytes().to_vec())) + .collect(), + ), + Token::FixedBytes(self.range_hash.as_bytes().to_vec()), + Token::Uint(self.data_root_index), + Token::FixedBytes(self.blob_root.as_bytes().to_vec()), + Token::FixedBytes(self.bridge_root.as_bytes().to_vec()), + Token::FixedBytes(self.leaf.as_bytes().to_vec()), + Token::Uint(self.leaf_index), + ])] + } } impl AvailClient { pub async fn new(config: AvailConfig, secrets: AvailSecrets) -> anyhow::Result { - let seed_phrase = secrets - .seed_phrase - .ok_or_else(|| anyhow::anyhow!("seed phrase"))?; - let sdk_client = RawAvailClient::new(config.app_id, seed_phrase.0.expose_secret()).await?; - - Ok(Self { - config, - sdk_client: Arc::new(sdk_client), - }) + let api_client = Arc::new(reqwest::Client::new()); + match config.config.clone() { + AvailClientConfig::GasRelay(conf) => { + let gas_relay_api_key = secrets + .gas_relay_api_key + .ok_or_else(|| anyhow::anyhow!("Gas relay API key is missing"))?; + let gas_relay_client = GasRelayClient::new( + &conf.gas_relay_api_url, + gas_relay_api_key.0.expose_secret(), + conf.max_retries, + Arc::clone(&api_client), + ) + .await?; + Ok(Self { + config, + sdk_client: Arc::new(AvailClientMode::GasRelay(gas_relay_client)), + api_client, + }) + } + AvailClientConfig::FullClient(conf) => { + let seed_phrase = secrets + .seed_phrase + .ok_or_else(|| anyhow::anyhow!("Seed phrase is missing"))?; + // these unwraps are safe because we validate in protobuf config + let sdk_client = + RawAvailClient::new(conf.app_id, seed_phrase.0.expose_secret()).await?; + + Ok(Self { + config, + sdk_client: Arc::new(AvailClientMode::Default(Box::new(sdk_client))), + api_client, + }) + } + } } } @@ -39,37 +141,83 @@ impl DataAvailabilityClient for AvailClient { _: u32, // batch_number data: Vec, ) -> anyhow::Result { - let client = WsClientBuilder::default() - .build(self.config.api_node_url.as_str()) - .await - .map_err(to_non_retriable_da_error)?; + match self.sdk_client.as_ref() { + AvailClientMode::Default(client) => { + let default_config = match &self.config.config { + AvailClientConfig::FullClient(conf) => conf, + _ => unreachable!(), // validated in protobuf config + }; + let ws_client = WsClientBuilder::default() + .build(default_config.api_node_url.clone().as_str()) + .await + .map_err(to_non_retriable_da_error)?; - let extrinsic = self - .sdk_client - .build_extrinsic(&client, data) - .await - .map_err(to_non_retriable_da_error)?; + let extrinsic = client + .build_extrinsic(&ws_client, data) + .await + .map_err(to_non_retriable_da_error)?; - let block_hash = self - .sdk_client - .submit_extrinsic(&client, extrinsic.as_str()) - .await - .map_err(to_non_retriable_da_error)?; - let tx_id = self - .sdk_client - .get_tx_id(&client, block_hash.as_str(), extrinsic.as_str()) - .await - .map_err(to_non_retriable_da_error)?; - - Ok(DispatchResponse::from(format!("{}:{}", block_hash, tx_id))) + let block_hash = client + .submit_extrinsic(&ws_client, extrinsic.as_str()) + .await + .map_err(to_non_retriable_da_error)?; + let tx_id = client + .get_tx_id(&ws_client, block_hash.as_str(), extrinsic.as_str()) + .await + .map_err(to_non_retriable_da_error)?; + Ok(DispatchResponse::from(format!("{}:{}", block_hash, tx_id))) + } + AvailClientMode::GasRelay(client) => { + let (block_hash, extrinsic_index) = client + .post_data(data) + .await + .map_err(to_retriable_da_error)?; + Ok(DispatchResponse { + blob_id: format!("{:x}:{}", block_hash, extrinsic_index), + }) + } + } } async fn get_inclusion_data( &self, - _blob_id: &str, + blob_id: &str, ) -> anyhow::Result, DAError> { - // TODO: implement inclusion data retrieval - Ok(Some(InclusionData { data: vec![] })) + let (block_hash, tx_idx) = blob_id.split_once(':').ok_or_else(|| DAError { + error: anyhow!("Invalid blob ID format"), + is_retriable: false, + })?; + let url = format!( + "{}/eth/proof/{}?index={}", + self.config.bridge_api_url, block_hash, tx_idx + ); + + let response = self + .api_client + .get(&url) + .timeout(Duration::from_millis(self.config.timeout_ms as u64)) + .send() + .await + .map_err(to_retriable_da_error)?; + + let bridge_api_data = response + .json::() + .await + .map_err(to_retriable_da_error)?; + + let attestation_data: MerkleProofInput = MerkleProofInput { + data_root_proof: bridge_api_data.data_root_proof.unwrap(), + leaf_proof: bridge_api_data.leaf_proof.unwrap(), + range_hash: bridge_api_data.range_hash.unwrap(), + data_root_index: bridge_api_data.data_root_index.unwrap(), + blob_root: bridge_api_data.blob_root.unwrap(), + bridge_root: bridge_api_data.bridge_root.unwrap(), + leaf: bridge_api_data.leaf.unwrap(), + leaf_index: bridge_api_data.leaf_index.unwrap(), + }; + Ok(Some(InclusionData { + data: ethabi::encode(&attestation_data.into_tokens()), + })) } fn clone_boxed(&self) -> Box { @@ -80,10 +228,3 @@ impl DataAvailabilityClient for AvailClient { Some(RawAvailClient::MAX_BLOB_SIZE) } } - -pub fn to_non_retriable_da_error(error: impl Into) -> DAError { - DAError { - error: error.into(), - is_retriable: false, - } -} diff --git a/core/node/da_clients/src/avail/sdk.rs b/core/node/da_clients/src/avail/sdk.rs index 002422109d05..19309dc3cbf3 100644 --- a/core/node/da_clients/src/avail/sdk.rs +++ b/core/node/da_clients/src/avail/sdk.rs @@ -1,20 +1,24 @@ //! Minimal reimplementation of the Avail SDK client required for the DA client implementation. //! This is considered to be a temporary solution until a mature SDK is available on crates.io -use std::fmt::Debug; +use std::{fmt::Debug, sync::Arc, time}; +use backon::{ConstantBuilder, Retryable}; +use bytes::Bytes; use jsonrpsee::{ core::client::{Client, ClientT, Subscription, SubscriptionClientT}, rpc_params, }; use parity_scale_codec::{Compact, Decode, Encode}; use scale_encode::EncodeAsFields; +use serde::{Deserialize, Serialize}; use subxt_signer::{ bip39::Mnemonic, sr25519::{Keypair, Signature}, }; +use zksync_types::H256; -use crate::avail::client::to_non_retriable_da_error; +use crate::utils::to_non_retriable_da_error; const PROTOCOL_VERSION: u8 = 4; @@ -287,7 +291,7 @@ impl RawAvailClient { let status = sub.next().await.transpose()?; if status.is_some() && status.as_ref().unwrap().is_object() { - if let Some(block_hash) = status.unwrap().get("inBlock") { + if let Some(block_hash) = status.unwrap().get("finalized") { break block_hash .as_str() .ok_or_else(|| anyhow::anyhow!("Invalid block hash"))? @@ -369,3 +373,95 @@ fn ss58hash(data: &[u8]) -> Vec { ctx.update(data); ctx.finalize().to_vec() } + +/// An implementation of the `DataAvailabilityClient` trait that interacts with the Avail network. +#[derive(Debug, Clone)] +pub(crate) struct GasRelayClient { + api_url: String, + api_key: String, + max_retries: usize, + api_client: Arc, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct GasRelayAPISubmissionResponse { + submission_id: String, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct GasRelayAPIStatusResponse { + submission: GasRelayAPISubmission, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct GasRelayAPISubmission { + block_hash: Option, + extrinsic_index: Option, +} + +impl GasRelayClient { + const DEFAULT_INCLUSION_DELAY: time::Duration = time::Duration::from_secs(60); + const RETRY_DELAY: time::Duration = time::Duration::from_secs(5); + pub(crate) async fn new( + api_url: &str, + api_key: &str, + max_retries: usize, + api_client: Arc, + ) -> anyhow::Result { + Ok(Self { + api_url: api_url.to_owned(), + api_key: api_key.to_owned(), + max_retries, + api_client, + }) + } + + pub(crate) async fn post_data(&self, data: Vec) -> anyhow::Result<(H256, u64)> { + let submit_url = format!("{}/user/submit_raw_data?token=ethereum", &self.api_url); + // send the data to the gas relay + let submit_response = self + .api_client + .post(&submit_url) + .body(Bytes::from(data)) + .header("Content-Type", "text/plain") + .header("Authorization", &self.api_key) + .send() + .await?; + + let submit_response = submit_response + .json::() + .await?; + + let status_url = format!( + "{}/user/get_submission_info?submission_id={}", + self.api_url, submit_response.submission_id + ); + + tokio::time::sleep(Self::DEFAULT_INCLUSION_DELAY).await; + let status_response = (|| async { + self.api_client + .get(&status_url) + .header("Authorization", &self.api_key) + .send() + .await + }) + .retry( + &ConstantBuilder::default() + .with_delay(Self::RETRY_DELAY) + .with_max_times(self.max_retries), + ) + .await?; + + let status_response = status_response.json::().await?; + let (block_hash, extrinsic_index) = ( + status_response.submission.block_hash.ok_or_else(|| { + anyhow::anyhow!("Block hash not found in the response from the gas relay") + })?, + status_response.submission.extrinsic_index.ok_or_else(|| { + anyhow::anyhow!("Extrinsic index not found in the response from the gas relay") + })?, + ); + + Ok((block_hash, extrinsic_index)) + } +} diff --git a/core/node/da_clients/src/celestia/README.md b/core/node/da_clients/src/celestia/README.md new file mode 100644 index 000000000000..a3142a7d7615 --- /dev/null +++ b/core/node/da_clients/src/celestia/README.md @@ -0,0 +1,19 @@ +# Celestia client + +--- + +This is an implementation of the Celestia client capable of sending the blobs to DA layer. Normally, the light client is +required to send the blobs to Celestia, but this implementation is capable of sending the blobs to DA layer directly. + +This is a simplified and adapted version of astria's code, look +[here](https://github.com/astriaorg/astria/tree/main/crates/astria-sequencer-relayer) for original implementation. + +The generated files are copied from +[here](https://github.com/astriaorg/astria/tree/main/crates/astria-core/src/generated), which is not perfect, but allows +us to use them without adding the proto files and the infrastructure to generate the `.rs`. + +While moving the files, the `#[cfg(feature = "client")]` annotations were removed for simplicity, so client code is +available by default. + +If there is a need to generate the files from the proto files, the `tools/protobuf-compiler` from astria's repo can be +used. diff --git a/core/node/da_clients/src/celestia/client.rs b/core/node/da_clients/src/celestia/client.rs new file mode 100644 index 000000000000..df0735d4e1e4 --- /dev/null +++ b/core/node/da_clients/src/celestia/client.rs @@ -0,0 +1,109 @@ +use std::{ + fmt::{Debug, Formatter}, + str::FromStr, + sync::Arc, + time, +}; + +use async_trait::async_trait; +use celestia_types::{blob::Commitment, nmt::Namespace, Blob}; +use serde::{Deserialize, Serialize}; +use subxt_signer::ExposeSecret; +use tonic::transport::Endpoint; +use zksync_config::configs::da_client::celestia::{CelestiaConfig, CelestiaSecrets}; +use zksync_da_client::{ + types::{DAError, DispatchResponse, InclusionData}, + DataAvailabilityClient, +}; + +use crate::{ + celestia::sdk::{BlobTxHash, RawCelestiaClient}, + utils::to_non_retriable_da_error, +}; + +/// An implementation of the `DataAvailabilityClient` trait that interacts with the Avail network. +#[derive(Clone)] +pub struct CelestiaClient { + config: CelestiaConfig, + client: Arc, +} + +impl CelestiaClient { + pub async fn new(config: CelestiaConfig, secrets: CelestiaSecrets) -> anyhow::Result { + let grpc_channel = Endpoint::from_str(config.api_node_url.clone().as_str())? + .timeout(time::Duration::from_millis(config.timeout_ms)) + .connect() + .await?; + + let private_key = secrets.private_key.0.expose_secret().to_string(); + let client = RawCelestiaClient::new(grpc_channel, private_key, config.chain_id.clone()) + .expect("could not create Celestia client"); + + Ok(Self { + config, + client: Arc::new(client), + }) + } +} +#[derive(Serialize, Deserialize)] +pub struct BlobId { + pub commitment: Commitment, + pub height: u64, +} + +#[async_trait] +impl DataAvailabilityClient for CelestiaClient { + async fn dispatch_blob( + &self, + _: u32, // batch number + data: Vec, + ) -> Result { + let namespace_bytes = + hex::decode(&self.config.namespace).map_err(to_non_retriable_da_error)?; + let namespace = + Namespace::new_v0(namespace_bytes.as_slice()).map_err(to_non_retriable_da_error)?; + let blob = Blob::new(namespace, data).map_err(to_non_retriable_da_error)?; + + let commitment = blob.commitment; + let blob_tx = self + .client + .prepare(vec![blob]) + .await + .map_err(to_non_retriable_da_error)?; + + let blob_tx_hash = BlobTxHash::compute(&blob_tx); + let height = self + .client + .submit(blob_tx_hash, blob_tx) + .await + .map_err(to_non_retriable_da_error)?; + + let blob_id = BlobId { commitment, height }; + let blob_bytes = bincode::serialize(&blob_id).map_err(to_non_retriable_da_error)?; + + Ok(DispatchResponse { + blob_id: hex::encode(&blob_bytes), + }) + } + + async fn get_inclusion_data(&self, _: &str) -> Result, DAError> { + Ok(Some(InclusionData { data: vec![] })) + } + + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } + + fn blob_size_limit(&self) -> Option { + Some(1973786) // almost 2MB + } +} + +impl Debug for CelestiaClient { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("CelestiaClient") + .field("config.api_node_url", &self.config.api_node_url) + .field("config.namespace", &self.config.namespace) + .finish() + } +} diff --git a/core/node/da_clients/src/celestia/generated/celestia.blob.v1.rs b/core/node/da_clients/src/celestia/generated/celestia.blob.v1.rs new file mode 100644 index 000000000000..ee6ed85655e2 --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/celestia.blob.v1.rs @@ -0,0 +1,200 @@ +// This file is @generated by prost-build. +/// Params defines the parameters for the module. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct Params { + #[prost(uint32, tag = "1")] + pub gas_per_blob_byte: u32, + #[prost(uint64, tag = "2")] + pub gov_max_square_size: u64, +} +impl ::prost::Name for Params { + const NAME: &'static str = "Params"; + const PACKAGE: &'static str = "celestia.blob.v1"; + fn full_name() -> ::prost::alloc::string::String { + "celestia.blob.v1.Params".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/celestia.blob.v1.Params".into() + } +} +/// QueryParamsRequest is the request type for the Query/Params RPC method. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryParamsRequest {} +impl ::prost::Name for QueryParamsRequest { + const NAME: &'static str = "QueryParamsRequest"; + const PACKAGE: &'static str = "celestia.blob.v1"; + fn full_name() -> ::prost::alloc::string::String { + "celestia.blob.v1.QueryParamsRequest".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/celestia.blob.v1.QueryParamsRequest".into() + } +} +/// QueryParamsResponse is the response type for the Query/Params RPC method. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryParamsResponse { + #[prost(message, optional, tag = "1")] + pub params: ::core::option::Option, +} +impl ::prost::Name for QueryParamsResponse { + const NAME: &'static str = "QueryParamsResponse"; + const PACKAGE: &'static str = "celestia.blob.v1"; + fn full_name() -> ::prost::alloc::string::String { + "celestia.blob.v1.QueryParamsResponse".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/celestia.blob.v1.QueryParamsResponse".into() + } +} +/// Generated client implementations. +pub mod query_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Query defines the gRPC query service. + #[derive(Debug, Clone)] + pub struct QueryClient { + inner: tonic::client::Grpc, + } + impl QueryClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl QueryClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> QueryClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + QueryClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Params queries the parameters of the module. + pub async fn params( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/celestia.blob.v1.Query/Params", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("celestia.blob.v1.Query", "Params")); + self.inner.unary(req, path, codec).await + } + } +} + +/// MsgPayForBlobs pays for the inclusion of a blob in the block. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgPayForBlobs { + #[prost(string, tag = "1")] + pub signer: ::prost::alloc::string::String, + /// namespaces is a list of namespaces that the blobs are associated with. A + /// namespace is a byte slice of length 29 where the first byte is the + /// namespaceVersion and the subsequent 28 bytes are the namespaceId. + #[prost(bytes = "bytes", repeated, tag = "2")] + pub namespaces: ::prost::alloc::vec::Vec<::prost::bytes::Bytes>, + #[prost(uint32, repeated, tag = "3")] + pub blob_sizes: ::prost::alloc::vec::Vec, + /// share_commitments is a list of share commitments (one per blob). + #[prost(bytes = "bytes", repeated, tag = "4")] + pub share_commitments: ::prost::alloc::vec::Vec<::prost::bytes::Bytes>, + /// share_versions are the versions of the share format that the blobs + /// associated with this message should use when included in a block. The + /// share_versions specified must match the share_versions used to generate the + /// share_commitment in this message. + #[prost(uint32, repeated, tag = "8")] + pub share_versions: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for MsgPayForBlobs { + const NAME: &'static str = "MsgPayForBlobs"; + const PACKAGE: &'static str = "celestia.blob.v1"; + fn full_name() -> ::prost::alloc::string::String { + "celestia.blob.v1.MsgPayForBlobs".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/celestia.blob.v1.MsgPayForBlobs".into() + } +} diff --git a/core/node/da_clients/src/celestia/generated/cosmos.auth.v1beta1.rs b/core/node/da_clients/src/celestia/generated/cosmos.auth.v1beta1.rs new file mode 100644 index 000000000000..98314985a8e6 --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/cosmos.auth.v1beta1.rs @@ -0,0 +1,257 @@ +// This file is @generated by prost-build. +/// BaseAccount defines a base account type. It contains all the necessary fields +/// for basic account functionality. Any custom account type should extend this +/// type for additional functionality (e.g. vesting). +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BaseAccount { + #[prost(string, tag = "1")] + pub address: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub pub_key: ::core::option::Option<::pbjson_types::Any>, + #[prost(uint64, tag = "3")] + pub account_number: u64, + #[prost(uint64, tag = "4")] + pub sequence: u64, +} +impl ::prost::Name for BaseAccount { + const NAME: &'static str = "BaseAccount"; + const PACKAGE: &'static str = "cosmos.auth.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.auth.v1beta1.BaseAccount".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.auth.v1beta1.BaseAccount".into() + } +} +/// Params defines the parameters for the auth module. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct Params { + #[prost(uint64, tag = "1")] + pub max_memo_characters: u64, + #[prost(uint64, tag = "2")] + pub tx_sig_limit: u64, + #[prost(uint64, tag = "3")] + pub tx_size_cost_per_byte: u64, + #[prost(uint64, tag = "4")] + pub sig_verify_cost_ed25519: u64, + #[prost(uint64, tag = "5")] + pub sig_verify_cost_secp256k1: u64, +} +impl ::prost::Name for Params { + const NAME: &'static str = "Params"; + const PACKAGE: &'static str = "cosmos.auth.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.auth.v1beta1.Params".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.auth.v1beta1.Params".into() + } +} +/// QueryAccountRequest is the request type for the Query/Account RPC method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryAccountRequest { + /// address defines the address to query for. + #[prost(string, tag = "1")] + pub address: ::prost::alloc::string::String, +} +impl ::prost::Name for QueryAccountRequest { + const NAME: &'static str = "QueryAccountRequest"; + const PACKAGE: &'static str = "cosmos.auth.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.auth.v1beta1.QueryAccountRequest".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.auth.v1beta1.QueryAccountRequest".into() + } +} +/// QueryAccountResponse is the response type for the Query/Account RPC method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryAccountResponse { + /// account defines the account of the corresponding address. + #[prost(message, optional, tag = "1")] + pub account: ::core::option::Option<::pbjson_types::Any>, +} +impl ::prost::Name for QueryAccountResponse { + const NAME: &'static str = "QueryAccountResponse"; + const PACKAGE: &'static str = "cosmos.auth.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.auth.v1beta1.QueryAccountResponse".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.auth.v1beta1.QueryAccountResponse".into() + } +} +/// QueryParamsRequest is the request type for the Query/Params RPC method. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryParamsRequest {} +impl ::prost::Name for QueryParamsRequest { + const NAME: &'static str = "QueryParamsRequest"; + const PACKAGE: &'static str = "cosmos.auth.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.auth.v1beta1.QueryParamsRequest".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.auth.v1beta1.QueryParamsRequest".into() + } +} +/// QueryParamsResponse is the response type for the Query/Params RPC method. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryParamsResponse { + /// params defines the parameters of the module. + #[prost(message, optional, tag = "1")] + pub params: ::core::option::Option, +} +impl ::prost::Name for QueryParamsResponse { + const NAME: &'static str = "QueryParamsResponse"; + const PACKAGE: &'static str = "cosmos.auth.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.auth.v1beta1.QueryParamsResponse".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.auth.v1beta1.QueryParamsResponse".into() + } +} +/// Generated client implementations. +pub mod query_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Query defines the gRPC querier service. + #[derive(Debug, Clone)] + pub struct QueryClient { + inner: tonic::client::Grpc, + } + impl QueryClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl QueryClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> QueryClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + QueryClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Account returns account details based on address. + pub async fn account( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.auth.v1beta1.Query/Account", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.auth.v1beta1.Query", "Account")); + self.inner.unary(req, path, codec).await + } + /// Params queries all parameters. + pub async fn params( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.auth.v1beta1.Query/Params", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.auth.v1beta1.Query", "Params")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/core/node/da_clients/src/celestia/generated/cosmos.base.abci.v1beta1.rs b/core/node/da_clients/src/celestia/generated/cosmos.base.abci.v1beta1.rs new file mode 100644 index 000000000000..6b0f9fc1956d --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/cosmos.base.abci.v1beta1.rs @@ -0,0 +1,125 @@ +// This file is @generated by prost-build. +/// TxResponse defines a structure containing relevant tx data and metadata. The +/// tags are stringified and the log is JSON decoded. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxResponse { + /// The block height + #[prost(int64, tag = "1")] + pub height: i64, + /// The transaction hash. + #[prost(string, tag = "2")] + pub txhash: ::prost::alloc::string::String, + /// Namespace for the Code + #[prost(string, tag = "3")] + pub codespace: ::prost::alloc::string::String, + /// Response code. + #[prost(uint32, tag = "4")] + pub code: u32, + /// Result bytes, if any. + #[prost(string, tag = "5")] + pub data: ::prost::alloc::string::String, + /// The output of the application's logger (raw string). May be + /// non-deterministic. + #[prost(string, tag = "6")] + pub raw_log: ::prost::alloc::string::String, + /// The output of the application's logger (typed). May be non-deterministic. + #[prost(message, repeated, tag = "7")] + pub logs: ::prost::alloc::vec::Vec, + /// Additional information. May be non-deterministic. + #[prost(string, tag = "8")] + pub info: ::prost::alloc::string::String, + /// Amount of gas requested for transaction. + #[prost(int64, tag = "9")] + pub gas_wanted: i64, + /// Amount of gas consumed by transaction. + #[prost(int64, tag = "10")] + pub gas_used: i64, + /// The request transaction bytes. + #[prost(message, optional, tag = "11")] + pub tx: ::core::option::Option<::pbjson_types::Any>, + /// Time of the previous block. For heights > 1, it's the weighted median of + /// the timestamps of the valid votes in the block.LastCommit. For height == 1, + /// it's genesis time. + #[prost(string, tag = "12")] + pub timestamp: ::prost::alloc::string::String, + /// Events defines all the events emitted by processing a transaction. Note, + /// these events include those emitted by processing all the messages and those + /// emitted from the ante. Whereas Logs contains the events, with + /// additional metadata, emitted only by processing the messages. + /// + /// Since: cosmos-sdk 0.42.11, 0.44.5, 0.45 + #[prost(message, repeated, tag = "13")] + pub events: ::prost::alloc::vec::Vec< + super::super::super::tendermint::abci::Event, + >, +} +impl ::prost::Name for TxResponse { + const NAME: &'static str = "TxResponse"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.base.abci.v1beta1.TxResponse".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.base.abci.v1beta1.TxResponse".into() + } +} +/// ABCIMessageLog defines a structure containing an indexed tx ABCI message log. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AbciMessageLog { + #[prost(uint32, tag = "1")] + pub msg_index: u32, + #[prost(string, tag = "2")] + pub log: ::prost::alloc::string::String, + /// Events contains a slice of Event objects that were emitted during some + /// execution. + #[prost(message, repeated, tag = "3")] + pub events: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for AbciMessageLog { + const NAME: &'static str = "ABCIMessageLog"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.base.abci.v1beta1.ABCIMessageLog".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.base.abci.v1beta1.ABCIMessageLog".into() + } +} +/// StringEvent defines en Event object wrapper where all the attributes +/// contain key/value pairs that are strings instead of raw bytes. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StringEvent { + #[prost(string, tag = "1")] + pub r#type: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub attributes: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for StringEvent { + const NAME: &'static str = "StringEvent"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.base.abci.v1beta1.StringEvent".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.base.abci.v1beta1.StringEvent".into() + } +} +/// Attribute defines an attribute wrapper where the key and value are +/// strings instead of raw bytes. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Attribute { + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub value: ::prost::alloc::string::String, +} +impl ::prost::Name for Attribute { + const NAME: &'static str = "Attribute"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.base.abci.v1beta1.Attribute".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.base.abci.v1beta1.Attribute".into() + } +} diff --git a/core/node/da_clients/src/celestia/generated/cosmos.base.node.v1beta1.rs b/core/node/da_clients/src/celestia/generated/cosmos.base.node.v1beta1.rs new file mode 100644 index 000000000000..89bb519bd810 --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/cosmos.base.node.v1beta1.rs @@ -0,0 +1,146 @@ +// This file is @generated by prost-build. +/// ConfigRequest defines the request structure for the Config gRPC query. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct ConfigRequest {} +impl ::prost::Name for ConfigRequest { + const NAME: &'static str = "ConfigRequest"; + const PACKAGE: &'static str = "cosmos.base.node.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.base.node.v1beta1.ConfigRequest".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.base.node.v1beta1.ConfigRequest".into() + } +} +/// ConfigResponse defines the response structure for the Config gRPC query. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ConfigResponse { + #[prost(string, tag = "1")] + pub minimum_gas_price: ::prost::alloc::string::String, +} +impl ::prost::Name for ConfigResponse { + const NAME: &'static str = "ConfigResponse"; + const PACKAGE: &'static str = "cosmos.base.node.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.base.node.v1beta1.ConfigResponse".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.base.node.v1beta1.ConfigResponse".into() + } +} +/// Generated client implementations. +pub mod service_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Service defines the gRPC querier service for node related queries. + #[derive(Debug, Clone)] + pub struct ServiceClient { + inner: tonic::client::Grpc, + } + impl ServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl ServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> ServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + ServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Config queries for the operator configuration. + pub async fn config( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.base.node.v1beta1.Service/Config", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.base.node.v1beta1.Service", "Config")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/core/node/da_clients/src/celestia/generated/cosmos.base.v1beta1.rs b/core/node/da_clients/src/celestia/generated/cosmos.base.v1beta1.rs new file mode 100644 index 000000000000..d13fb784d97a --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/cosmos.base.v1beta1.rs @@ -0,0 +1,19 @@ +// This file is @generated by prost-build. +/// Coin defines a token with a denomination and an amount. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Coin { + #[prost(string, tag = "1")] + pub denom: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub amount: ::prost::alloc::string::String, +} +impl ::prost::Name for Coin { + const NAME: &'static str = "Coin"; + const PACKAGE: &'static str = "cosmos.base.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.base.v1beta1.Coin".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.base.v1beta1.Coin".into() + } +} diff --git a/core/node/da_clients/src/celestia/generated/cosmos.crypto.multisig.v1beta1.rs b/core/node/da_clients/src/celestia/generated/cosmos.crypto.multisig.v1beta1.rs new file mode 100644 index 000000000000..c514b3739b21 --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/cosmos.crypto.multisig.v1beta1.rs @@ -0,0 +1,40 @@ +// This file is @generated by prost-build. +/// MultiSignature wraps the signatures from a multisig.LegacyAminoPubKey. +/// See cosmos.tx.v1betata1.ModeInfo.Multi for how to specify which signers +/// signed and with which modes. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MultiSignature { + #[prost(bytes = "bytes", repeated, tag = "1")] + pub signatures: ::prost::alloc::vec::Vec<::prost::bytes::Bytes>, +} +impl ::prost::Name for MultiSignature { + const NAME: &'static str = "MultiSignature"; + const PACKAGE: &'static str = "cosmos.crypto.multisig.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.crypto.multisig.v1beta1.MultiSignature".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.crypto.multisig.v1beta1.MultiSignature".into() + } +} +/// CompactBitArray is an implementation of a space efficient bit array. +/// This is used to ensure that the encoded data takes up a minimal amount of +/// space after proto encoding. +/// This is not thread safe, and is not intended for concurrent usage. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CompactBitArray { + #[prost(uint32, tag = "1")] + pub extra_bits_stored: u32, + #[prost(bytes = "bytes", tag = "2")] + pub elems: ::prost::bytes::Bytes, +} +impl ::prost::Name for CompactBitArray { + const NAME: &'static str = "CompactBitArray"; + const PACKAGE: &'static str = "cosmos.crypto.multisig.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.crypto.multisig.v1beta1.CompactBitArray".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.crypto.multisig.v1beta1.CompactBitArray".into() + } +} diff --git a/core/node/da_clients/src/celestia/generated/cosmos.crypto.secp256k1.rs b/core/node/da_clients/src/celestia/generated/cosmos.crypto.secp256k1.rs new file mode 100644 index 000000000000..081aec09682b --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/cosmos.crypto.secp256k1.rs @@ -0,0 +1,21 @@ +// This file is @generated by prost-build. +/// PubKey defines a secp256k1 public key +/// Key is the compressed form of the pubkey. The first byte depends is a 0x02 byte +/// if the y-coordinate is the lexicographically largest of the two associated with +/// the x-coordinate. Otherwise the first byte is a 0x03. +/// This prefix is followed with the x-coordinate. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PubKey { + #[prost(bytes = "bytes", tag = "1")] + pub key: ::prost::bytes::Bytes, +} +impl ::prost::Name for PubKey { + const NAME: &'static str = "PubKey"; + const PACKAGE: &'static str = "cosmos.crypto.secp256k1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.crypto.secp256k1.PubKey".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.crypto.secp256k1.PubKey".into() + } +} diff --git a/core/node/da_clients/src/celestia/generated/cosmos.tx.signing.v1beta1.rs b/core/node/da_clients/src/celestia/generated/cosmos.tx.signing.v1beta1.rs new file mode 100644 index 000000000000..54f3fa9d00d7 --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/cosmos.tx.signing.v1beta1.rs @@ -0,0 +1,72 @@ +// This file is @generated by prost-build. +/// SignMode represents a signing mode with its own security guarantees. +/// +/// This enum should be considered a registry of all known sign modes +/// in the Cosmos ecosystem. Apps are not expected to support all known +/// sign modes. Apps that would like to support custom sign modes are +/// encouraged to open a small PR against this file to add a new case +/// to this SignMode enum describing their sign mode so that different +/// apps have a consistent version of this enum. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SignMode { + /// SIGN_MODE_UNSPECIFIED specifies an unknown signing mode and will be + /// rejected. + Unspecified = 0, + /// SIGN_MODE_DIRECT specifies a signing mode which uses SignDoc and is + /// verified with raw bytes from Tx. + Direct = 1, + /// SIGN_MODE_TEXTUAL is a future signing mode that will verify some + /// human-readable textual representation on top of the binary representation + /// from SIGN_MODE_DIRECT. It is currently not supported. + Textual = 2, + /// SIGN_MODE_DIRECT_AUX specifies a signing mode which uses + /// SignDocDirectAux. As opposed to SIGN_MODE_DIRECT, this sign mode does not + /// require signers signing over other signers' `signer_info`. It also allows + /// for adding Tips in transactions. + /// + /// Since: cosmos-sdk 0.46 + DirectAux = 3, + /// SIGN_MODE_LEGACY_AMINO_JSON is a backwards compatibility mode which uses + /// Amino JSON and will be removed in the future. + LegacyAminoJson = 127, + /// SIGN_MODE_EIP_191 specifies the sign mode for EIP 191 signing on the Cosmos + /// SDK. Ref: + /// + /// Currently, SIGN_MODE_EIP_191 is registered as a SignMode enum variant, + /// but is not implemented on the SDK by default. To enable EIP-191, you need + /// to pass a custom `TxConfig` that has an implementation of + /// `SignModeHandler` for EIP-191. The SDK may decide to fully support + /// EIP-191 in the future. + /// + /// Since: cosmos-sdk 0.45.2 + Eip191 = 191, +} +impl SignMode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Unspecified => "SIGN_MODE_UNSPECIFIED", + Self::Direct => "SIGN_MODE_DIRECT", + Self::Textual => "SIGN_MODE_TEXTUAL", + Self::DirectAux => "SIGN_MODE_DIRECT_AUX", + Self::LegacyAminoJson => "SIGN_MODE_LEGACY_AMINO_JSON", + Self::Eip191 => "SIGN_MODE_EIP_191", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SIGN_MODE_UNSPECIFIED" => Some(Self::Unspecified), + "SIGN_MODE_DIRECT" => Some(Self::Direct), + "SIGN_MODE_TEXTUAL" => Some(Self::Textual), + "SIGN_MODE_DIRECT_AUX" => Some(Self::DirectAux), + "SIGN_MODE_LEGACY_AMINO_JSON" => Some(Self::LegacyAminoJson), + "SIGN_MODE_EIP_191" => Some(Self::Eip191), + _ => None, + } + } +} diff --git a/core/node/da_clients/src/celestia/generated/cosmos.tx.v1beta1.rs b/core/node/da_clients/src/celestia/generated/cosmos.tx.v1beta1.rs new file mode 100644 index 000000000000..7783eabcdbac --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/cosmos.tx.v1beta1.rs @@ -0,0 +1,553 @@ +// This file is @generated by prost-build. +/// Tx is the standard type used for broadcasting transactions. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Tx { + /// body is the processable content of the transaction + #[prost(message, optional, tag = "1")] + pub body: ::core::option::Option, + /// auth_info is the authorization related content of the transaction, + /// specifically signers, signer modes and fee + #[prost(message, optional, tag = "2")] + pub auth_info: ::core::option::Option, + /// signatures is a list of signatures that matches the length and order of + /// AuthInfo's signer_infos to allow connecting signature meta information like + /// public key and signing mode by position. + #[prost(bytes = "bytes", repeated, tag = "3")] + pub signatures: ::prost::alloc::vec::Vec<::prost::bytes::Bytes>, +} +impl ::prost::Name for Tx { + const NAME: &'static str = "Tx"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.Tx".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.Tx".into() + } +} +/// SignDoc is the type used for generating sign bytes for SIGN_MODE_DIRECT. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignDoc { + /// body_bytes is protobuf serialization of a TxBody that matches the + /// representation in TxRaw. + #[prost(bytes = "bytes", tag = "1")] + pub body_bytes: ::prost::bytes::Bytes, + /// auth_info_bytes is a protobuf serialization of an AuthInfo that matches the + /// representation in TxRaw. + #[prost(bytes = "bytes", tag = "2")] + pub auth_info_bytes: ::prost::bytes::Bytes, + /// chain_id is the unique identifier of the chain this transaction targets. + /// It prevents signed transactions from being used on another chain by an + /// attacker + #[prost(string, tag = "3")] + pub chain_id: ::prost::alloc::string::String, + /// account_number is the account number of the account in state + #[prost(uint64, tag = "4")] + pub account_number: u64, +} +impl ::prost::Name for SignDoc { + const NAME: &'static str = "SignDoc"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.SignDoc".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.SignDoc".into() + } +} +/// TxBody is the body of a transaction that all signers sign over. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxBody { + /// messages is a list of messages to be executed. The required signers of + /// those messages define the number and order of elements in AuthInfo's + /// signer_infos and Tx's signatures. Each required signer address is added to + /// the list only the first time it occurs. + /// By convention, the first required signer (usually from the first message) + /// is referred to as the primary signer and pays the fee for the whole + /// transaction. + #[prost(message, repeated, tag = "1")] + pub messages: ::prost::alloc::vec::Vec<::pbjson_types::Any>, + /// memo is any arbitrary note/comment to be added to the transaction. + /// WARNING: in clients, any publicly exposed text should not be called memo, + /// but should be called `note` instead (see ). + #[prost(string, tag = "2")] + pub memo: ::prost::alloc::string::String, + /// timeout is the block height after which this transaction will not + /// be processed by the chain + #[prost(uint64, tag = "3")] + pub timeout_height: u64, + /// extension_options are arbitrary options that can be added by chains + /// when the default options are not sufficient. If any of these are present + /// and can't be handled, the transaction will be rejected + #[prost(message, repeated, tag = "1023")] + pub extension_options: ::prost::alloc::vec::Vec<::pbjson_types::Any>, + /// extension_options are arbitrary options that can be added by chains + /// when the default options are not sufficient. If any of these are present + /// and can't be handled, they will be ignored + #[prost(message, repeated, tag = "2047")] + pub non_critical_extension_options: ::prost::alloc::vec::Vec<::pbjson_types::Any>, +} +impl ::prost::Name for TxBody { + const NAME: &'static str = "TxBody"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.TxBody".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.TxBody".into() + } +} +/// AuthInfo describes the fee and signer modes that are used to sign a +/// transaction. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AuthInfo { + /// signer_infos defines the signing modes for the required signers. The number + /// and order of elements must match the required signers from TxBody's + /// messages. The first element is the primary signer and the one which pays + /// the fee. + #[prost(message, repeated, tag = "1")] + pub signer_infos: ::prost::alloc::vec::Vec, + /// Fee is the fee and gas limit for the transaction. The first signer is the + /// primary signer and the one which pays the fee. The fee can be calculated + /// based on the cost of evaluating the body and doing signature verification + /// of the signers. This can be estimated via simulation. + #[prost(message, optional, tag = "2")] + pub fee: ::core::option::Option, + /// Tip is the optional tip used for transactions fees paid in another denom. + /// + /// This field is ignored if the chain didn't enable tips, i.e. didn't add the + /// `TipDecorator` in its posthandler. + /// + /// Since: cosmos-sdk 0.46 + #[prost(message, optional, tag = "3")] + pub tip: ::core::option::Option, +} +impl ::prost::Name for AuthInfo { + const NAME: &'static str = "AuthInfo"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.AuthInfo".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.AuthInfo".into() + } +} +/// SignerInfo describes the public key and signing mode of a single top-level +/// signer. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignerInfo { + /// public_key is the public key of the signer. It is optional for accounts + /// that already exist in state. If unset, the verifier can use the required \ + /// signer address for this position and lookup the public key. + #[prost(message, optional, tag = "1")] + pub public_key: ::core::option::Option<::pbjson_types::Any>, + /// mode_info describes the signing mode of the signer and is a nested + /// structure to support nested multisig pubkey's + #[prost(message, optional, tag = "2")] + pub mode_info: ::core::option::Option, + /// sequence is the sequence of the account, which describes the + /// number of committed transactions signed by a given address. It is used to + /// prevent replay attacks. + #[prost(uint64, tag = "3")] + pub sequence: u64, +} +impl ::prost::Name for SignerInfo { + const NAME: &'static str = "SignerInfo"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.SignerInfo".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.SignerInfo".into() + } +} +/// ModeInfo describes the signing mode of a single or nested multisig signer. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ModeInfo { + /// sum is the oneof that specifies whether this represents a single or nested + /// multisig signer + #[prost(oneof = "mode_info::Sum", tags = "1, 2")] + pub sum: ::core::option::Option, +} +/// Nested message and enum types in `ModeInfo`. +pub mod mode_info { + /// Single is the mode info for a single signer. It is structured as a message + /// to allow for additional fields such as locale for SIGN_MODE_TEXTUAL in the + /// future + #[derive(Clone, Copy, PartialEq, ::prost::Message)] + pub struct Single { + /// mode is the signing mode of the single signer + #[prost(enumeration = "super::super::signing::SignMode", tag = "1")] + pub mode: i32, + } + impl ::prost::Name for Single { + const NAME: &'static str = "Single"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.ModeInfo.Single".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.ModeInfo.Single".into() + } + } + /// Multi is the mode info for a multisig public key + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Multi { + /// bitarray specifies which keys within the multisig are signing + #[prost(message, optional, tag = "1")] + pub bitarray: ::core::option::Option< + super::super::super::crypto::multisig::CompactBitArray, + >, + /// mode_infos is the corresponding modes of the signers of the multisig + /// which could include nested multisig public keys + #[prost(message, repeated, tag = "2")] + pub mode_infos: ::prost::alloc::vec::Vec, + } + impl ::prost::Name for Multi { + const NAME: &'static str = "Multi"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.ModeInfo.Multi".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.ModeInfo.Multi".into() + } + } + /// sum is the oneof that specifies whether this represents a single or nested + /// multisig signer + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Sum { + /// single represents a single signer + #[prost(message, tag = "1")] + Single(Single), + /// multi represents a nested multisig signer + #[prost(message, tag = "2")] + Multi(Multi), + } +} +impl ::prost::Name for ModeInfo { + const NAME: &'static str = "ModeInfo"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.ModeInfo".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.ModeInfo".into() + } +} +/// Fee includes the amount of coins paid in fees and the maximum +/// gas to be used by the transaction. The ratio yields an effective "gasprice", +/// which must be above some miminum to be accepted into the mempool. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Fee { + /// amount is the amount of coins to be paid as a fee + #[prost(message, repeated, tag = "1")] + pub amount: ::prost::alloc::vec::Vec, + /// gas_limit is the maximum gas that can be used in transaction processing + /// before an out of gas error occurs + #[prost(uint64, tag = "2")] + pub gas_limit: u64, + /// if unset, the first signer is responsible for paying the fees. If set, the specified account must pay the fees. + /// the payer must be a tx signer (and thus have signed this field in AuthInfo). + /// setting this field does *not* change the ordering of required signers for the transaction. + #[prost(string, tag = "3")] + pub payer: ::prost::alloc::string::String, + /// if set, the fee payer (either the first signer or the value of the payer field) requests that a fee grant be used + /// to pay fees instead of the fee payer's own balance. If an appropriate fee grant does not exist or the chain does + /// not support fee grants, this will fail + #[prost(string, tag = "4")] + pub granter: ::prost::alloc::string::String, +} +impl ::prost::Name for Fee { + const NAME: &'static str = "Fee"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.Fee".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.Fee".into() + } +} +/// Tip is the tip used for meta-transactions. +/// +/// Since: cosmos-sdk 0.46 +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Tip { + /// amount is the amount of the tip + #[prost(message, repeated, tag = "1")] + pub amount: ::prost::alloc::vec::Vec, + /// tipper is the address of the account paying for the tip + #[prost(string, tag = "2")] + pub tipper: ::prost::alloc::string::String, +} +impl ::prost::Name for Tip { + const NAME: &'static str = "Tip"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.Tip".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.Tip".into() + } +} +/// BroadcastTxRequest is the request type for the Service.BroadcastTxRequest +/// RPC method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BroadcastTxRequest { + /// tx_bytes is the raw transaction. + #[prost(bytes = "bytes", tag = "1")] + pub tx_bytes: ::prost::bytes::Bytes, + #[prost(enumeration = "BroadcastMode", tag = "2")] + pub mode: i32, +} +impl ::prost::Name for BroadcastTxRequest { + const NAME: &'static str = "BroadcastTxRequest"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.BroadcastTxRequest".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.BroadcastTxRequest".into() + } +} +/// BroadcastTxResponse is the response type for the +/// Service.BroadcastTx method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BroadcastTxResponse { + /// tx_response is the queried TxResponses. + #[prost(message, optional, tag = "1")] + pub tx_response: ::core::option::Option< + super::super::base::abci::TxResponse, + >, +} +impl ::prost::Name for BroadcastTxResponse { + const NAME: &'static str = "BroadcastTxResponse"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.BroadcastTxResponse".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.BroadcastTxResponse".into() + } +} +/// GetTxRequest is the request type for the Service.GetTx +/// RPC method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTxRequest { + /// hash is the tx hash to query, encoded as a hex string. + #[prost(string, tag = "1")] + pub hash: ::prost::alloc::string::String, +} +impl ::prost::Name for GetTxRequest { + const NAME: &'static str = "GetTxRequest"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.GetTxRequest".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.GetTxRequest".into() + } +} +/// GetTxResponse is the response type for the Service.GetTx method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTxResponse { + /// tx is the queried transaction. + #[prost(message, optional, tag = "1")] + pub tx: ::core::option::Option, + /// tx_response is the queried TxResponses. + #[prost(message, optional, tag = "2")] + pub tx_response: ::core::option::Option< + super::super::base::abci::TxResponse, + >, +} +impl ::prost::Name for GetTxResponse { + const NAME: &'static str = "GetTxResponse"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + "cosmos.tx.v1beta1.GetTxResponse".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/cosmos.tx.v1beta1.GetTxResponse".into() + } +} +/// BroadcastMode specifies the broadcast mode for the TxService.Broadcast RPC method. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum BroadcastMode { + /// zero-value for mode ordering + Unspecified = 0, + /// BROADCAST_MODE_BLOCK defines a tx broadcasting mode where the client waits for + /// the tx to be committed in a block. + Block = 1, + /// BROADCAST_MODE_SYNC defines a tx broadcasting mode where the client waits for + /// a CheckTx execution response only. + Sync = 2, + /// BROADCAST_MODE_ASYNC defines a tx broadcasting mode where the client returns + /// immediately. + Async = 3, +} +impl BroadcastMode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Unspecified => "BROADCAST_MODE_UNSPECIFIED", + Self::Block => "BROADCAST_MODE_BLOCK", + Self::Sync => "BROADCAST_MODE_SYNC", + Self::Async => "BROADCAST_MODE_ASYNC", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "BROADCAST_MODE_UNSPECIFIED" => Some(Self::Unspecified), + "BROADCAST_MODE_BLOCK" => Some(Self::Block), + "BROADCAST_MODE_SYNC" => Some(Self::Sync), + "BROADCAST_MODE_ASYNC" => Some(Self::Async), + _ => None, + } + } +} +/// Generated client implementations. +pub mod service_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Service defines a gRPC service for interacting with transactions. + #[derive(Debug, Clone)] + pub struct ServiceClient { + inner: tonic::client::Grpc, + } + impl ServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl ServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> ServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + ServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// GetTx fetches a tx by hash. + pub async fn get_tx( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.tx.v1beta1.Service/GetTx", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.tx.v1beta1.Service", "GetTx")); + self.inner.unary(req, path, codec).await + } + /// BroadcastTx broadcast transaction. + pub async fn broadcast_tx( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.tx.v1beta1.Service/BroadcastTx", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.tx.v1beta1.Service", "BroadcastTx")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/core/node/da_clients/src/celestia/generated/tendermint.abci.rs b/core/node/da_clients/src/celestia/generated/tendermint.abci.rs new file mode 100644 index 000000000000..ab3bbeb946f6 --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/tendermint.abci.rs @@ -0,0 +1,42 @@ +// This file is @generated by prost-build. +/// Event allows application developers to attach additional information to +/// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. +/// Later, transactions may be queried using these events. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Event { + #[prost(string, tag = "1")] + pub r#type: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub attributes: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for Event { + const NAME: &'static str = "Event"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + "tendermint.abci.Event".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/tendermint.abci.Event".into() + } +} +/// EventAttribute is a single key-value pair, associated with an event. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventAttribute { + #[prost(bytes = "bytes", tag = "1")] + pub key: ::prost::bytes::Bytes, + #[prost(bytes = "bytes", tag = "2")] + pub value: ::prost::bytes::Bytes, + /// nondeterministic + #[prost(bool, tag = "3")] + pub index: bool, +} +impl ::prost::Name for EventAttribute { + const NAME: &'static str = "EventAttribute"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + "tendermint.abci.EventAttribute".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/tendermint.abci.EventAttribute".into() + } +} diff --git a/core/node/da_clients/src/celestia/generated/tendermint.types.rs b/core/node/da_clients/src/celestia/generated/tendermint.types.rs new file mode 100644 index 000000000000..000e3f2c1fbc --- /dev/null +++ b/core/node/da_clients/src/celestia/generated/tendermint.types.rs @@ -0,0 +1,48 @@ +// This file is @generated by prost-build. +/// Blob (named after binary large object) is a chunk of data submitted by a user +/// to be published to the Celestia blockchain. The data of a Blob is published +/// to a namespace and is encoded into shares based on the format specified by +/// share_version. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Blob { + #[prost(bytes = "bytes", tag = "1")] + pub namespace_id: ::prost::bytes::Bytes, + #[prost(bytes = "bytes", tag = "2")] + pub data: ::prost::bytes::Bytes, + #[prost(uint32, tag = "3")] + pub share_version: u32, + #[prost(uint32, tag = "4")] + pub namespace_version: u32, +} +impl ::prost::Name for Blob { + const NAME: &'static str = "Blob"; + const PACKAGE: &'static str = "tendermint.types"; + fn full_name() -> ::prost::alloc::string::String { + "tendermint.types.Blob".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/tendermint.types.Blob".into() + } +} +/// BlobTx wraps an encoded sdk.Tx with a second field to contain blobs of data. +/// The raw bytes of the blobs are not signed over, instead we verify each blob +/// using the relevant MsgPayForBlobs that is signed over in the encoded sdk.Tx. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobTx { + #[prost(bytes = "bytes", tag = "1")] + pub tx: ::prost::bytes::Bytes, + #[prost(message, repeated, tag = "2")] + pub blobs: ::prost::alloc::vec::Vec, + #[prost(string, tag = "3")] + pub type_id: ::prost::alloc::string::String, +} +impl ::prost::Name for BlobTx { + const NAME: &'static str = "BlobTx"; + const PACKAGE: &'static str = "tendermint.types"; + fn full_name() -> ::prost::alloc::string::String { + "tendermint.types.BlobTx".into() + } + fn type_url() -> ::prost::alloc::string::String { + "/tendermint.types.BlobTx".into() + } +} diff --git a/core/node/da_clients/src/celestia/mod.rs b/core/node/da_clients/src/celestia/mod.rs new file mode 100644 index 000000000000..ce648531f282 --- /dev/null +++ b/core/node/da_clients/src/celestia/mod.rs @@ -0,0 +1,58 @@ +mod client; +mod sdk; + +pub use self::client::CelestiaClient; + +pub mod celestia_proto { + include!("generated/celestia.blob.v1.rs"); +} + +pub mod cosmos { + pub mod auth { + include!("generated/cosmos.auth.v1beta1.rs"); + } + + pub mod base { + pub mod abci { + include!("generated/cosmos.base.abci.v1beta1.rs"); + } + + pub mod node { + include!("generated/cosmos.base.node.v1beta1.rs"); + } + + pub mod v1beta1 { + include!("generated/cosmos.base.v1beta1.rs"); + } + } + + pub mod tx { + pub mod signing { + include!("generated/cosmos.tx.signing.v1beta1.rs"); + } + + pub mod v1beta1 { + include!("generated/cosmos.tx.v1beta1.rs"); + } + } + + pub mod crypto { + pub mod multisig { + include!("generated/cosmos.crypto.multisig.v1beta1.rs"); + } + + pub mod secp256k1 { + include!("generated/cosmos.crypto.secp256k1.rs"); + } + } +} + +pub mod tendermint { + pub mod abci { + include!("generated/tendermint.abci.rs"); + } + + pub mod types { + include!("generated/tendermint.types.rs"); + } +} diff --git a/core/node/da_clients/src/celestia/sdk.rs b/core/node/da_clients/src/celestia/sdk.rs new file mode 100644 index 000000000000..5fd9aea79f07 --- /dev/null +++ b/core/node/da_clients/src/celestia/sdk.rs @@ -0,0 +1,602 @@ +use std::{ + fmt::{Display, Formatter, Result}, + str::FromStr, + time::{Duration, Instant}, +}; + +use celestia_types::Blob; +use prost::{bytes::Bytes, Message, Name}; +use secp256k1::{PublicKey, Secp256k1, SecretKey}; +use sha2::Digest; +use tonic::transport::Channel; + +use super::{ + celestia_proto::{ + query_client::QueryClient as BlobQueryClient, MsgPayForBlobs, + QueryParamsRequest as QueryBlobParamsRequest, + }, + cosmos::{ + auth::{ + query_client::QueryClient as AuthQueryClient, BaseAccount, QueryAccountRequest, + QueryParamsRequest as QueryAuthParamsRequest, + }, + base::{ + node::{ + service_client::ServiceClient as MinGasPriceClient, + ConfigRequest as MinGasPriceRequest, + }, + v1beta1::Coin, + }, + crypto::secp256k1 as ec_proto, + tx::v1beta1::{ + mode_info::{Single, Sum}, + service_client::ServiceClient as TxClient, + AuthInfo, BroadcastMode, BroadcastTxRequest, Fee, GetTxRequest, ModeInfo, SignDoc, + SignerInfo, Tx, TxBody, + }, + }, + tendermint::types::{Blob as PbBlob, BlobTx}, +}; + +const UNITS_SUFFIX: &str = "utia"; +pub const ADDRESS_LENGTH: usize = 20; +const ACCOUNT_ADDRESS_PREFIX: bech32::Hrp = bech32::Hrp::parse_unchecked("celestia"); +const BLOB_TX_TYPE_ID: &str = "BLOB"; + +#[derive(Clone)] +pub(crate) struct RawCelestiaClient { + grpc_channel: Channel, + address: String, + chain_id: String, + signing_key: SecretKey, +} + +impl RawCelestiaClient { + pub(crate) fn new( + grpc_channel: Channel, + private_key: String, + chain_id: String, + ) -> anyhow::Result { + let signing_key = SecretKey::from_str(&private_key) + .map_err(|e| anyhow::anyhow!("Failed to parse private key: {}", e))?; + let address = get_address(signing_key.public_key(&Secp256k1::new()))?; + + Ok(Self { + grpc_channel, + address, + chain_id, + signing_key, + }) + } + + /// Prepares a blob transaction for the given blobs. + pub(crate) async fn prepare(&self, blobs: Vec) -> anyhow::Result { + let (gas_per_blob_byte, tx_size_cost_per_byte, min_gas_price, base_account) = tokio::try_join!( + self.get_gas_per_blob_byte(), + self.fetch_tx_size_cost_per_byte(), + self.fetch_min_gas_price(), + self.fetch_account(), + )?; + + let msg_pay_for_blobs = new_msg_pay_for_blobs(blobs.as_slice(), self.address.clone())?; + + let gas_limit = estimate_gas( + &msg_pay_for_blobs.blob_sizes, + gas_per_blob_byte, + tx_size_cost_per_byte, + ); + let fee = calculate_fee(min_gas_price, gas_limit); + + let signed_tx = new_signed_tx( + &msg_pay_for_blobs, + &base_account, + gas_limit, + fee, + self.chain_id.clone(), + &self.signing_key, + ); + + Ok(new_blob_tx(&signed_tx, blobs.iter())) + } + + /// Submits the blob transaction to the node and returns the height of the block in which it was + pub(super) async fn submit( + &self, + blob_tx_hash: BlobTxHash, + blob_tx: BlobTx, + ) -> anyhow::Result { + let mut client: TxClient = TxClient::new(self.grpc_channel.clone()); + let hex_encoded_tx_hash = self.broadcast_tx(&mut client, blob_tx).await?; + if hex_encoded_tx_hash != blob_tx_hash.clone().hex() { + tracing::error!( + "tx hash {} returned from celestia app is not the same as \ + the locally calculated one {}; submission file has invalid data", + hex_encoded_tx_hash, + blob_tx_hash + ); + } + tracing::info!(tx_hash = %hex_encoded_tx_hash, "broadcast blob transaction succeeded"); + + let height = self + .confirm_submission(&mut client, hex_encoded_tx_hash) + .await; + Ok(height) + } + + /// Fetches the gas cost per byte for blobs from the node. + async fn get_gas_per_blob_byte(&self) -> anyhow::Result { + let mut blob_query_client = BlobQueryClient::new(self.grpc_channel.clone()); + let response = blob_query_client.params(QueryBlobParamsRequest {}).await; + + let params = response + .map_err(|status| { + anyhow::format_err!( + "failed to get blob params, code: {}, message: {}", + status.code(), + status.message() + ) + })? + .into_inner() + .params + .ok_or_else(|| anyhow::anyhow!("EmptyBlobParams"))?; + + Ok(params.gas_per_blob_byte) + } + + /// Fetches the transaction size cost per byte from the node. + async fn fetch_tx_size_cost_per_byte(&self) -> anyhow::Result { + let mut auth_query_client = AuthQueryClient::new(self.grpc_channel.clone()); + let response = auth_query_client.params(QueryAuthParamsRequest {}).await; + + let params = response + .map_err(|status| { + anyhow::format_err!( + "failed to get auth params, code: {}, message: {}", + status.code(), + status.message() + ) + })? + .into_inner() + .params + .ok_or_else(|| anyhow::anyhow!("EmptyAuthParams"))?; + + Ok(params.tx_size_cost_per_byte) + } + + /// Fetches the minimum gas price from the node. + async fn fetch_min_gas_price(&self) -> anyhow::Result { + let mut min_gas_price_client = MinGasPriceClient::new(self.grpc_channel.clone()); + let response = min_gas_price_client.config(MinGasPriceRequest {}).await; + + let min_gas_price_with_suffix = response + .map_err(|status| { + anyhow::format_err!( + "failed to get price params, code: {}, message: {}", + status.code(), + status.message() + ) + })? + .into_inner() + .minimum_gas_price; + + let min_gas_price_str = min_gas_price_with_suffix + .strip_suffix(UNITS_SUFFIX) + .ok_or_else(|| { + anyhow::anyhow!( + "MinGasPrice bad suffix, min_gas_price: {}, expected_suffix: {}", + min_gas_price_with_suffix.clone(), + UNITS_SUFFIX + ) + })?; + + min_gas_price_str.parse::().map_err(|source| { + anyhow::anyhow!( + "Failed to parse min gas price, min_gas_price: {}, err: {}", + min_gas_price_str, + source, + ) + }) + } + + /// Fetches the account info for the current address. + async fn fetch_account(&self) -> anyhow::Result { + let mut auth_query_client = AuthQueryClient::new(self.grpc_channel.clone()); + let request = QueryAccountRequest { + address: self.address.clone(), + }; + + let account_info = auth_query_client.account(request).await.map_err(|status| { + anyhow::anyhow!( + "failed to get account info, code: {}, message: {}", + status.code(), + status.message() + ) + })?; + + let account_as_any = account_info + .into_inner() + .account + .ok_or_else(|| anyhow::anyhow!("empty account info"))?; + let expected_type_url = BaseAccount::type_url(); + + if expected_type_url == account_as_any.type_url { + return BaseAccount::decode(&*account_as_any.value) + .map_err(|error| anyhow::anyhow!("failed to decode account info: {}", error)); + } + + Err(anyhow::anyhow!( + "unexpected account type, expected: {}, got: {}", + expected_type_url, + account_as_any.type_url + )) + } + + /// Broadcasts the transaction and returns the transaction hash. + async fn broadcast_tx( + &self, + client: &mut TxClient, + blob_tx: BlobTx, + ) -> anyhow::Result { + let request = BroadcastTxRequest { + tx_bytes: Bytes::from(blob_tx.encode_to_vec()), + mode: i32::from(BroadcastMode::Sync), + }; + + let mut tx_response = client + .broadcast_tx(request) + .await + .map_err(|status| { + anyhow::anyhow!( + "failed to broadcast the tx, code: {}, message: {}", + status.code(), + status.message() + ) + })? + .into_inner() + .tx_response + .ok_or_else(|| anyhow::anyhow!("empty broadcast tx response"))?; + + if tx_response.code != 0 { + return Err(anyhow::format_err!( + "failed to broadcast the tx, tx_hash: {}, code: {}, namespace: {}, log: {}", + tx_response.txhash, + tx_response.code, + tx_response.codespace, + tx_response.raw_log, + )); + } + + tx_response.txhash.make_ascii_lowercase(); + Ok(tx_response.txhash) + } + + /// Waits for the transaction to be included in a block and returns the height of that block. + async fn confirm_submission( + &self, + client: &mut TxClient, + hex_encoded_tx_hash: String, + ) -> u64 { + // The min seconds to sleep after receiving a GetTx response and sending the next request. + const MIN_POLL_INTERVAL_SECS: u64 = 1; + // The max seconds to sleep after receiving a GetTx response and sending the next request. + const MAX_POLL_INTERVAL_SECS: u64 = 12; + // How long to wait after starting `confirm_submission` before starting to log errors. + const START_LOGGING_DELAY: Duration = Duration::from_secs(12); + // The minimum duration between logging errors. + const LOG_ERROR_INTERVAL: Duration = Duration::from_secs(5); + + let start = Instant::now(); + let mut logged_at = start; + + let mut log_if_due = |maybe_error: Option| { + if start.elapsed() <= START_LOGGING_DELAY || logged_at.elapsed() <= LOG_ERROR_INTERVAL { + return; + } + let reason = maybe_error + .map_or(anyhow::anyhow!("transaction still pending"), |error| { + anyhow::anyhow!("transaction still pending, error: {}", error) + }); + tracing::warn!( + %reason, + tx_hash = %hex_encoded_tx_hash, + elapsed_seconds = start.elapsed().as_secs_f32(), + "waiting to confirm blob submission" + ); + logged_at = Instant::now(); + }; + + let mut sleep_secs = MIN_POLL_INTERVAL_SECS; + loop { + tokio::time::sleep(Duration::from_secs(sleep_secs)).await; + let res = self + .clone() + .get_tx(client, hex_encoded_tx_hash.clone()) + .await; + match res { + Ok(Some(height)) => return height, + Ok(None) => { + sleep_secs = MIN_POLL_INTERVAL_SECS; + log_if_due(None); + } + Err(error) => { + sleep_secs = + std::cmp::min(sleep_secs.saturating_mul(2), MAX_POLL_INTERVAL_SECS); + log_if_due(Some(error)); + } + } + } + } + + /// Returns the height of the block in which the transaction was included (if it was). + async fn get_tx( + self, + client: &mut TxClient, + hex_encoded_tx_hash: String, + ) -> anyhow::Result> { + let request = GetTxRequest { + hash: hex_encoded_tx_hash, + }; + let response = client.get_tx(request).await; + + let ok_response = match response { + Ok(resp) => resp, + Err(status) => { + if status.code() == tonic::Code::NotFound { + tracing::trace!(msg = status.message(), "transaction still pending"); + return Ok(None); + } + return Err(anyhow::anyhow!( + "failed to get tx, code: {}, message: {}", + status.code(), + status.message() + )); + } + }; + let tx_response = ok_response + .into_inner() + .tx_response + .ok_or_else(|| anyhow::anyhow!("Empty get tx response"))?; + if tx_response.code != 0 { + return Err(anyhow::anyhow!( + "failed to get tx, tx_hash: {}, code: {}, namespace: {}, log: {}", + tx_response.txhash, + tx_response.code, + tx_response.codespace, + tx_response.raw_log, + )); + } + if tx_response.height == 0 { + tracing::trace!(tx_hash = %tx_response.txhash, "transaction still pending"); + return Ok(None); + } + + let height = u64::try_from(tx_response.height).map_err(|_| { + anyhow::anyhow!("GetTxResponseNegativeBlockHeight: {}", tx_response.height) + })?; + + tracing::debug!(tx_hash = %tx_response.txhash, height, "transaction succeeded"); + Ok(Some(height)) + } +} + +/// Returns a `BlobTx` for the given signed tx and blobs. +fn new_blob_tx<'a>(signed_tx: &Tx, blobs: impl Iterator) -> BlobTx { + let blobs = blobs + .map(|blob| PbBlob { + namespace_id: Bytes::from(blob.namespace.id().to_vec()), + namespace_version: u32::from(blob.namespace.version()), + data: Bytes::from(blob.data.clone()), + share_version: u32::from(blob.share_version), + }) + .collect(); + BlobTx { + tx: Bytes::from(signed_tx.encode_to_vec()), + blobs, + type_id: BLOB_TX_TYPE_ID.to_string(), + } +} + +/// Returns a signed tx for the given message, account and metadata. +fn new_signed_tx( + msg_pay_for_blobs: &MsgPayForBlobs, + base_account: &BaseAccount, + gas_limit: u64, + fee: u64, + chain_id: String, + signing_key: &SecretKey, +) -> Tx { + const SIGNING_MODE_INFO: Option = Some(ModeInfo { + sum: Some(Sum::Single(Single { mode: 1 })), + }); + + let fee_coin = Coin { + denom: UNITS_SUFFIX.to_string(), + amount: fee.to_string(), + }; + let fee = Fee { + amount: vec![fee_coin], + gas_limit, + ..Fee::default() + }; + + let public_key = ec_proto::PubKey { + key: Bytes::from( + signing_key + .public_key(&Secp256k1::new()) + .serialize() + .to_vec(), + ), + }; + let public_key_as_any = pbjson_types::Any { + type_url: ec_proto::PubKey::type_url(), + value: public_key.encode_to_vec().into(), + }; + let auth_info = AuthInfo { + signer_infos: vec![SignerInfo { + public_key: Some(public_key_as_any), + mode_info: SIGNING_MODE_INFO, + sequence: base_account.sequence, + }], + fee: Some(fee), + tip: None, + }; + + let msg = pbjson_types::Any { + type_url: MsgPayForBlobs::type_url(), + value: msg_pay_for_blobs.encode_to_vec().into(), + }; + let tx_body = TxBody { + messages: vec![msg], + ..TxBody::default() + }; + + let bytes_to_sign = SignDoc { + body_bytes: Bytes::from(tx_body.encode_to_vec()), + auth_info_bytes: Bytes::from(auth_info.encode_to_vec()), + chain_id, + account_number: base_account.account_number, + } + .encode_to_vec(); + let hashed_bytes: [u8; 32] = sha2::Sha256::digest(bytes_to_sign).into(); + let signature = secp256k1::Secp256k1::new().sign_ecdsa( + &secp256k1::Message::from_slice(&hashed_bytes[..]).unwrap(), // unwrap is safe here because we know the length of the hashed bytes + signing_key, + ); + Tx { + body: Some(tx_body), + auth_info: Some(auth_info), + signatures: vec![Bytes::from(signature.serialize_compact().to_vec())], + } +} + +/// Returns the fee for the signed tx. +fn calculate_fee(min_gas_price: f64, gas_limit: u64) -> u64 { + let calculated_fee = (min_gas_price * gas_limit as f64).ceil() as u64; + tracing::info!( + "calculated fee: {}, min_gas_price: {}, gas_limit: {}", + calculated_fee, + min_gas_price, + gas_limit + ); + + calculated_fee +} + +fn estimate_gas(blob_sizes: &[u32], gas_per_blob_byte: u32, tx_size_cost_per_byte: u64) -> u64 { + // From https://github.com/celestiaorg/celestia-app/blob/v1.4.0/pkg/appconsts/global_consts.go#L28 + const SHARE_SIZE: u64 = 512; + // From https://github.com/celestiaorg/celestia-app/blob/v1.4.0/pkg/appconsts/global_consts.go#L55 + const CONTINUATION_COMPACT_SHARE_CONTENT_SIZE: u32 = 482; + // From https://github.com/celestiaorg/celestia-app/blob/v1.4.0/pkg/appconsts/global_consts.go#L59 + const FIRST_SPARSE_SHARE_CONTENT_SIZE: u32 = 478; + // From https://github.com/celestiaorg/celestia-app/blob/v1.4.0/x/blob/types/payforblob.go#L40 + const PFB_GAS_FIXED_COST: u64 = 75_000; + // From https://github.com/celestiaorg/celestia-app/blob/v1.4.0/x/blob/types/payforblob.go#L44 + const BYTES_PER_BLOB_INFO: u64 = 70; + + // From https://github.com/celestiaorg/celestia-app/blob/v1.4.0/pkg/shares/share_sequence.go#L126 + // + // `blob_len` is the size in bytes of one blob's `data` field. + fn sparse_shares_needed(blob_len: u32) -> u64 { + if blob_len == 0 { + return 0; + } + + if blob_len < FIRST_SPARSE_SHARE_CONTENT_SIZE { + return 1; + } + + // Use `u64` here to avoid overflow while adding below. + let mut bytes_available = u64::from(FIRST_SPARSE_SHARE_CONTENT_SIZE); + let mut shares_needed = 1_u64; + while bytes_available < u64::from(blob_len) { + bytes_available = bytes_available + .checked_add(u64::from(CONTINUATION_COMPACT_SHARE_CONTENT_SIZE)) + .expect( + "this can't overflow, as on each iteration `bytes_available < u32::MAX`, and \ + we're adding at most `u32::MAX` to it", + ); + shares_needed = shares_needed.checked_add(1).expect( + "this can't overflow, as the loop cannot execute for `u64::MAX` iterations", + ); + } + shares_needed + } + + let total_shares_used: u64 = blob_sizes.iter().copied().map(sparse_shares_needed).sum(); + let blob_count = blob_sizes.len().try_into().unwrap_or(u64::MAX); + + let shares_gas = total_shares_used + .saturating_mul(SHARE_SIZE) + .saturating_mul(u64::from(gas_per_blob_byte)); + let blob_info_gas = tx_size_cost_per_byte + .saturating_mul(BYTES_PER_BLOB_INFO) + .saturating_mul(blob_count); + + shares_gas + .saturating_add(blob_info_gas) + .saturating_add(PFB_GAS_FIXED_COST) +} + +/// Prepares a `MsgPayForBlobs` message for the given blobs. +fn new_msg_pay_for_blobs(blobs: &[Blob], signer: String) -> anyhow::Result { + let mut blob_sizes = Vec::with_capacity(blobs.len()); + let mut namespaces = Vec::with_capacity(blobs.len()); + let mut share_commitments = Vec::with_capacity(blobs.len()); + let mut share_versions = Vec::with_capacity(blobs.len()); + for blob in blobs { + blob_sizes.push(blob.data.len()); + namespaces.push(Bytes::from(blob.namespace.as_bytes().to_vec())); + share_commitments.push(Bytes::from(blob.commitment.0.to_vec())); + share_versions.push(u32::from(blob.share_version)); + } + + let blob_sizes = blob_sizes + .into_iter() + .map(|blob_size| { + u32::try_from(blob_size) + .map_err(|_| anyhow::anyhow!("blob too large, size: {}", blob_size)) + }) + .collect::>()?; + + Ok(MsgPayForBlobs { + signer, + namespaces, + blob_sizes, + share_commitments, + share_versions, + }) +} + +fn get_address(public_key: PublicKey) -> anyhow::Result { + use ripemd::{Digest, Ripemd160}; + + let sha_digest = sha2::Sha256::digest(public_key.serialize()); + let ripemd_digest = Ripemd160::digest(&sha_digest[..]); + let mut bytes = [0u8; ADDRESS_LENGTH]; + bytes.copy_from_slice(&ripemd_digest[..ADDRESS_LENGTH]); + + Ok(bech32::encode::( + ACCOUNT_ADDRESS_PREFIX, + bytes.as_slice(), + )?) +} + +#[derive(Clone, Debug)] +pub(super) struct BlobTxHash([u8; 32]); + +impl BlobTxHash { + pub(super) fn compute(blob_tx: &BlobTx) -> Self { + Self(sha2::Sha256::digest(&blob_tx.tx).into()) + } + + pub(super) fn hex(self) -> String { + hex::encode(self.0) + } +} + +impl Display for BlobTxHash { + fn fmt(&self, formatter: &mut Formatter<'_>) -> Result { + write!(formatter, "{}", hex::encode(self.0)) + } +} diff --git a/core/node/da_clients/src/eigen/README.md b/core/node/da_clients/src/eigen/README.md new file mode 100644 index 000000000000..634b4eb58780 --- /dev/null +++ b/core/node/da_clients/src/eigen/README.md @@ -0,0 +1,35 @@ +# EigenDA client + +--- + +This is an implementation of the EigenDA client capable of sending the blobs to DA layer. It uses authenticated +requests, though the auth headers are kind of mocked in the current API implementation. + +The generated files are received by compiling the `.proto` files from EigenDA repo using the following function: + +```rust +pub fn compile_protos() { + let fds = protox::compile( + [ + "proto/common.proto", + "proto/disperser.proto", + ], + ["."], + ) + .expect("protox failed to build"); + + tonic_build::configure() + .build_client(true) + .build_server(false) + .skip_protoc_run() + .out_dir("generated") + .compile_fds(fds) + .unwrap(); +} +``` + +proto files are not included here to not create confusion in case they are not updated in time, so the EigenDA +[repo](https://github.com/Layr-Labs/eigenda/tree/master/api/proto) has to be a source of truth for the proto files. + +The generated folder here is considered a temporary solution until the EigenDA has a library with either a protogen, or +preferably a full Rust client implementation. diff --git a/core/node/da_clients/src/eigen/client.rs b/core/node/da_clients/src/eigen/client.rs new file mode 100644 index 000000000000..d977620526aa --- /dev/null +++ b/core/node/da_clients/src/eigen/client.rs @@ -0,0 +1,65 @@ +use std::{str::FromStr, sync::Arc}; + +use async_trait::async_trait; +use secp256k1::SecretKey; +use subxt_signer::ExposeSecret; +use zksync_config::{configs::da_client::eigen::EigenSecrets, EigenConfig}; +use zksync_da_client::{ + types::{DAError, DispatchResponse, InclusionData}, + DataAvailabilityClient, +}; + +use super::sdk::RawEigenClient; +use crate::utils::to_non_retriable_da_error; + +#[derive(Debug, Clone)] +pub struct EigenClient { + client: Arc, +} + +impl EigenClient { + pub async fn new(config: EigenConfig, secrets: EigenSecrets) -> anyhow::Result { + let private_key = SecretKey::from_str(secrets.private_key.0.expose_secret().as_str()) + .map_err(|e| anyhow::anyhow!("Failed to parse private key: {}", e))?; + + Ok(EigenClient { + client: Arc::new( + RawEigenClient::new( + config.rpc_node_url, + config.inclusion_polling_interval_ms, + private_key, + ) + .await?, + ), + }) + } +} + +#[async_trait] +impl DataAvailabilityClient for EigenClient { + async fn dispatch_blob( + &self, + _: u32, // batch number + data: Vec, + ) -> Result { + let blob_id = self + .client + .dispatch_blob(data) + .await + .map_err(to_non_retriable_da_error)?; + + Ok(DispatchResponse::from(blob_id)) + } + + async fn get_inclusion_data(&self, _: &str) -> Result, DAError> { + Ok(Some(InclusionData { data: vec![] })) + } + + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } + + fn blob_size_limit(&self) -> Option { + Some(1920 * 1024) // 2mb - 128kb as a buffer + } +} diff --git a/core/node/da_clients/src/eigen/generated/common.rs b/core/node/da_clients/src/eigen/generated/common.rs new file mode 100644 index 000000000000..0599b9af4127 --- /dev/null +++ b/core/node/da_clients/src/eigen/generated/common.rs @@ -0,0 +1,63 @@ +// This file is @generated by prost-build. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct G1Commitment { + /// The X coordinate of the KZG commitment. This is the raw byte representation of the field element. + #[prost(bytes = "vec", tag = "1")] + pub x: ::prost::alloc::vec::Vec, + /// The Y coordinate of the KZG commitment. This is the raw byte representation of the field element. + #[prost(bytes = "vec", tag = "2")] + pub y: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct G2Commitment { + /// The A0 element of the X coordinate of G2 point. + #[prost(bytes = "vec", tag = "1")] + pub x_a0: ::prost::alloc::vec::Vec, + /// The A1 element of the X coordinate of G2 point. + #[prost(bytes = "vec", tag = "2")] + pub x_a1: ::prost::alloc::vec::Vec, + /// The A0 element of the Y coordinate of G2 point. + #[prost(bytes = "vec", tag = "3")] + pub y_a0: ::prost::alloc::vec::Vec, + /// The A1 element of the Y coordinate of G2 point. + #[prost(bytes = "vec", tag = "4")] + pub y_a1: ::prost::alloc::vec::Vec, +} +/// BlobCommitment represents commitment of a specific blob, containing its +/// KZG commitment, degree proof, the actual degree, and data length in number of symbols. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobCommitment { + #[prost(message, optional, tag = "1")] + pub commitment: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub length_commitment: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub length_proof: ::core::option::Option, + #[prost(uint32, tag = "4")] + pub data_length: u32, +} +/// BlobCertificate is what gets attested by the network +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobCertificate { + #[prost(uint32, tag = "1")] + pub version: u32, + #[prost(bytes = "vec", tag = "2")] + pub blob_key: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub blob_commitment: ::core::option::Option, + #[prost(uint32, repeated, tag = "4")] + pub quorum_numbers: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "5")] + pub reference_block_number: u32, +} +/// A chunk of a blob. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ChunkData { + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, +} diff --git a/core/node/da_clients/src/eigen/generated/disperser.rs b/core/node/da_clients/src/eigen/generated/disperser.rs new file mode 100644 index 000000000000..7e94d910ecb7 --- /dev/null +++ b/core/node/da_clients/src/eigen/generated/disperser.rs @@ -0,0 +1,517 @@ +// This file is @generated by prost-build. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AuthenticatedRequest { + #[prost(oneof = "authenticated_request::Payload", tags = "1, 2")] + pub payload: ::core::option::Option, +} +/// Nested message and enum types in `AuthenticatedRequest`. +pub mod authenticated_request { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Payload { + #[prost(message, tag = "1")] + DisperseRequest(super::DisperseBlobRequest), + #[prost(message, tag = "2")] + AuthenticationData(super::AuthenticationData), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AuthenticatedReply { + #[prost(oneof = "authenticated_reply::Payload", tags = "1, 2")] + pub payload: ::core::option::Option, +} +/// Nested message and enum types in `AuthenticatedReply`. +pub mod authenticated_reply { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Payload { + #[prost(message, tag = "1")] + BlobAuthHeader(super::BlobAuthHeader), + #[prost(message, tag = "2")] + DisperseReply(super::DisperseBlobReply), + } +} +/// BlobAuthHeader contains information about the blob for the client to verify and sign. +/// - Once payments are enabled, the BlobAuthHeader will contain the KZG commitment to the blob, which the client +/// will verify and sign. Having the client verify the KZG commitment instead of calculating it avoids +/// the need for the client to have the KZG structured reference string (SRS), which can be large. +/// The signed KZG commitment prevents the disperser from sending a different blob to the DA Nodes +/// than the one the client sent. +/// - In the meantime, the BlobAuthHeader contains a simple challenge parameter is used to prevent +/// replay attacks in the event that a signature is leaked. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobAuthHeader { + #[prost(uint32, tag = "1")] + pub challenge_parameter: u32, +} +/// AuthenticationData contains the signature of the BlobAuthHeader. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AuthenticationData { + #[prost(bytes = "vec", tag = "1")] + pub authentication_data: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DisperseBlobRequest { + /// The data to be dispersed. + /// The size of data must be <= 2MiB. Every 32 bytes of data chunk is interpreted as an integer in big endian format + /// where the lower address has more significant bits. The integer must stay in the valid range to be interpreted + /// as a field element on the bn254 curve. The valid range is + /// 0 <= x < 21888242871839275222246405745257275088548364400416034343698204186575808495617 + /// containing slightly less than 254 bits and more than 253 bits. If any one of the 32 bytes chunk is outside the range, + /// the whole request is deemed as invalid, and rejected. + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, + /// The quorums to which the blob will be sent, in addition to the required quorums which are configured + /// on the EigenDA smart contract. If required quorums are included here, an error will be returned. + /// The disperser will ensure that the encoded blobs for each quorum are all processed + /// within the same batch. + #[prost(uint32, repeated, tag = "2")] + pub custom_quorum_numbers: ::prost::alloc::vec::Vec, + /// The account ID of the client. This should be a hex-encoded string of the ECSDA public key + /// corresponding to the key used by the client to sign the BlobAuthHeader. + #[prost(string, tag = "3")] + pub account_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DisperseBlobReply { + /// The status of the blob associated with the request_id. + #[prost(enumeration = "BlobStatus", tag = "1")] + pub result: i32, + /// The request ID generated by the disperser. + /// Once a request is accepted (although not processed), a unique request ID will be + /// generated. + /// Two different DisperseBlobRequests (determined by the hash of the DisperseBlobRequest) + /// will have different IDs, and the same DisperseBlobRequest sent repeatedly at different + /// times will also have different IDs. + /// The client should use this ID to query the processing status of the request (via + /// the GetBlobStatus API). + #[prost(bytes = "vec", tag = "2")] + pub request_id: ::prost::alloc::vec::Vec, +} +/// BlobStatusRequest is used to query the status of a blob. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobStatusRequest { + #[prost(bytes = "vec", tag = "1")] + pub request_id: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobStatusReply { + /// The status of the blob. + #[prost(enumeration = "BlobStatus", tag = "1")] + pub status: i32, + /// The blob info needed for clients to confirm the blob against the EigenDA contracts. + #[prost(message, optional, tag = "2")] + pub info: ::core::option::Option, +} +/// RetrieveBlobRequest contains parameters to retrieve the blob. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RetrieveBlobRequest { + #[prost(bytes = "vec", tag = "1")] + pub batch_header_hash: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "2")] + pub blob_index: u32, +} +/// RetrieveBlobReply contains the retrieved blob data +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RetrieveBlobReply { + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, +} +/// BlobInfo contains information needed to confirm the blob against the EigenDA contracts +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobInfo { + #[prost(message, optional, tag = "1")] + pub blob_header: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub blob_verification_proof: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobHeader { + /// KZG commitment of the blob. + #[prost(message, optional, tag = "1")] + pub commitment: ::core::option::Option, + /// The length of the blob in symbols (each symbol is 32 bytes). + #[prost(uint32, tag = "2")] + pub data_length: u32, + /// The params of the quorums that this blob participates in. + #[prost(message, repeated, tag = "3")] + pub blob_quorum_params: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobQuorumParam { + /// The ID of the quorum. + #[prost(uint32, tag = "1")] + pub quorum_number: u32, + /// The max percentage of stake within the quorum that can be held by or delegated + /// to adversarial operators. Currently, this and the next parameter are standardized + /// across the quorum using values read from the EigenDA contracts. + #[prost(uint32, tag = "2")] + pub adversary_threshold_percentage: u32, + /// The min percentage of stake that must attest in order to consider + /// the dispersal is successful. + #[prost(uint32, tag = "3")] + pub confirmation_threshold_percentage: u32, + /// The length of each chunk. + #[prost(uint32, tag = "4")] + pub chunk_length: u32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobVerificationProof { + /// batch_id is an incremental ID assigned to a batch by EigenDAServiceManager + #[prost(uint32, tag = "1")] + pub batch_id: u32, + /// The index of the blob in the batch (which is logically an ordered list of blobs). + #[prost(uint32, tag = "2")] + pub blob_index: u32, + #[prost(message, optional, tag = "3")] + pub batch_metadata: ::core::option::Option, + /// inclusion_proof is a merkle proof for a blob header's inclusion in a batch + #[prost(bytes = "vec", tag = "4")] + pub inclusion_proof: ::prost::alloc::vec::Vec, + /// indexes of quorums in BatchHeader.quorum_numbers that match the quorums in BlobHeader.blob_quorum_params + /// Ex. BlobHeader.blob_quorum_params = [ + /// { + /// quorum_number = 0, + /// ... + /// }, + /// { + /// quorum_number = 3, + /// ... + /// }, + /// { + /// quorum_number = 5, + /// ... + /// }, + /// ] + /// BatchHeader.quorum_numbers = \[0, 5, 3\] => 0x000503 + /// Then, quorum_indexes = \[0, 2, 1\] => 0x000201 + #[prost(bytes = "vec", tag = "5")] + pub quorum_indexes: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchMetadata { + #[prost(message, optional, tag = "1")] + pub batch_header: ::core::option::Option, + /// The hash of all public keys of the operators that did not sign the batch. + #[prost(bytes = "vec", tag = "2")] + pub signatory_record_hash: ::prost::alloc::vec::Vec, + /// The fee payment paid by users for dispersing this batch. It's the bytes + /// representation of a big.Int value. + #[prost(bytes = "vec", tag = "3")] + pub fee: ::prost::alloc::vec::Vec, + /// The Ethereum block number at which the batch is confirmed onchain. + #[prost(uint32, tag = "4")] + pub confirmation_block_number: u32, + /// This is the hash of the ReducedBatchHeader defined onchain, see: + /// + /// The is the message that the operators will sign their signatures on. + #[prost(bytes = "vec", tag = "5")] + pub batch_header_hash: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchHeader { + /// The root of the merkle tree with the hashes of blob headers as leaves. + #[prost(bytes = "vec", tag = "1")] + pub batch_root: ::prost::alloc::vec::Vec, + /// All quorums associated with blobs in this batch. Sorted in ascending order. + /// Ex. \[0, 2, 1\] => 0x000102 + #[prost(bytes = "vec", tag = "2")] + pub quorum_numbers: ::prost::alloc::vec::Vec, + /// The percentage of stake that has signed for this batch. + /// The quorum_signed_percentages\[i\] is percentage for the quorum_numbers\[i\]. + #[prost(bytes = "vec", tag = "3")] + pub quorum_signed_percentages: ::prost::alloc::vec::Vec, + /// The Ethereum block number at which the batch was created. + /// The Disperser will encode and disperse the blobs based on the onchain info + /// (e.g. operator stakes) at this block number. + #[prost(uint32, tag = "4")] + pub reference_block_number: u32, +} +/// BlobStatus represents the status of a blob. +/// The status of a blob is updated as the blob is processed by the disperser. +/// The status of a blob can be queried by the client using the GetBlobStatus API. +/// Intermediate states are states that the blob can be in while being processed, and it can be updated to a differet state: +/// - PROCESSING +/// - DISPERSING +/// - CONFIRMED +/// Terminal states are states that will not be updated to a different state: +/// - FAILED +/// - FINALIZED +/// - INSUFFICIENT_SIGNATURES +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum BlobStatus { + Unknown = 0, + /// PROCESSING means that the blob is currently being processed by the disperser + Processing = 1, + /// CONFIRMED means that the blob has been dispersed to DA Nodes and the dispersed + /// batch containing the blob has been confirmed onchain + Confirmed = 2, + /// FAILED means that the blob has failed permanently (for reasons other than insufficient + /// signatures, which is a separate state) + Failed = 3, + /// FINALIZED means that the block containing the blob's confirmation transaction has been finalized on Ethereum + Finalized = 4, + /// INSUFFICIENT_SIGNATURES means that the confirmation threshold for the blob was not met + /// for at least one quorum. + InsufficientSignatures = 5, + /// DISPERSING means that the blob is currently being dispersed to DA Nodes and being confirmed onchain + Dispersing = 6, +} +impl BlobStatus { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + BlobStatus::Unknown => "UNKNOWN", + BlobStatus::Processing => "PROCESSING", + BlobStatus::Confirmed => "CONFIRMED", + BlobStatus::Failed => "FAILED", + BlobStatus::Finalized => "FINALIZED", + BlobStatus::InsufficientSignatures => "INSUFFICIENT_SIGNATURES", + BlobStatus::Dispersing => "DISPERSING", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNKNOWN" => Some(Self::Unknown), + "PROCESSING" => Some(Self::Processing), + "CONFIRMED" => Some(Self::Confirmed), + "FAILED" => Some(Self::Failed), + "FINALIZED" => Some(Self::Finalized), + "INSUFFICIENT_SIGNATURES" => Some(Self::InsufficientSignatures), + "DISPERSING" => Some(Self::Dispersing), + _ => None, + } + } +} +/// Generated client implementations. +pub mod disperser_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Disperser defines the public APIs for dispersing blobs. + #[derive(Debug, Clone)] + pub struct DisperserClient { + inner: tonic::client::Grpc, + } + impl DisperserClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl DisperserClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> DisperserClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + DisperserClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// This API accepts blob to disperse from clients. + /// This executes the dispersal async, i.e. it returns once the request + /// is accepted. The client could use GetBlobStatus() API to poll the the + /// processing status of the blob. + pub async fn disperse_blob( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/disperser.Disperser/DisperseBlob", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("disperser.Disperser", "DisperseBlob")); + self.inner.unary(req, path, codec).await + } + /// DisperseBlobAuthenticated is similar to DisperseBlob, except that it requires the + /// client to authenticate itself via the AuthenticationData message. The protoco is as follows: + /// 1. The client sends a DisperseBlobAuthenticated request with the DisperseBlobRequest message + /// 2. The Disperser sends back a BlobAuthHeader message containing information for the client to + /// verify and sign. + /// 3. The client verifies the BlobAuthHeader and sends back the signed BlobAuthHeader in an + /// AuthenticationData message. + /// 4. The Disperser verifies the signature and returns a DisperseBlobReply message. + pub async fn disperse_blob_authenticated( + &mut self, + request: impl tonic::IntoStreamingRequest< + Message = super::AuthenticatedRequest, + >, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/disperser.Disperser/DisperseBlobAuthenticated", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("disperser.Disperser", "DisperseBlobAuthenticated"), + ); + self.inner.streaming(req, path, codec).await + } + /// This API is meant to be polled for the blob status. + pub async fn get_blob_status( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/disperser.Disperser/GetBlobStatus", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("disperser.Disperser", "GetBlobStatus")); + self.inner.unary(req, path, codec).await + } + /// This retrieves the requested blob from the Disperser's backend. + /// This is a more efficient way to retrieve blobs than directly retrieving + /// from the DA Nodes (see detail about this approach in + /// api/proto/retriever/retriever.proto). + /// The blob should have been initially dispersed via this Disperser service + /// for this API to work. + pub async fn retrieve_blob( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/disperser.Disperser/RetrieveBlob", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("disperser.Disperser", "RetrieveBlob")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/core/node/da_clients/src/eigen/mod.rs b/core/node/da_clients/src/eigen/mod.rs new file mode 100644 index 000000000000..699eae894246 --- /dev/null +++ b/core/node/da_clients/src/eigen/mod.rs @@ -0,0 +1,14 @@ +mod client; +mod sdk; + +pub use self::client::EigenClient; + +#[allow(clippy::all)] +pub(crate) mod disperser { + include!("generated/disperser.rs"); +} + +#[allow(clippy::all)] +pub(crate) mod common { + include!("generated/common.rs"); +} diff --git a/core/node/da_clients/src/eigen/sdk.rs b/core/node/da_clients/src/eigen/sdk.rs new file mode 100644 index 000000000000..7ab7ea3ce33b --- /dev/null +++ b/core/node/da_clients/src/eigen/sdk.rs @@ -0,0 +1,246 @@ +use std::{str::FromStr, time::Duration}; + +use secp256k1::{ecdsa::RecoverableSignature, SecretKey}; +use tokio::sync::mpsc; +use tokio_stream::{wrappers::ReceiverStream, StreamExt}; +use tonic::{ + transport::{Channel, ClientTlsConfig, Endpoint}, + Streaming, +}; + +use crate::eigen::{ + disperser, + disperser::{ + authenticated_request::Payload::{AuthenticationData, DisperseRequest}, + disperser_client::DisperserClient, + AuthenticatedReply, BlobAuthHeader, BlobVerificationProof, DisperseBlobReply, + }, +}; + +#[derive(Debug, Clone)] +pub struct RawEigenClient { + client: DisperserClient, + polling_interval: Duration, + private_key: SecretKey, + account_id: String, +} + +pub(crate) const DATA_CHUNK_SIZE: usize = 32; + +impl RawEigenClient { + pub(crate) const BUFFER_SIZE: usize = 1000; + + pub async fn new( + rpc_node_url: String, + inclusion_polling_interval_ms: u64, + private_key: SecretKey, + ) -> anyhow::Result { + let endpoint = + Endpoint::from_str(rpc_node_url.as_str())?.tls_config(ClientTlsConfig::new())?; + let client = DisperserClient::connect(endpoint) + .await + .map_err(|e| anyhow::anyhow!("Failed to connect to Disperser server: {}", e))?; + let polling_interval = Duration::from_millis(inclusion_polling_interval_ms); + + let account_id = get_account_id(&private_key); + + Ok(RawEigenClient { + client, + polling_interval, + private_key, + account_id, + }) + } + + pub async fn dispatch_blob(&self, data: Vec) -> anyhow::Result { + let mut client_clone = self.client.clone(); + let (tx, rx) = mpsc::channel(Self::BUFFER_SIZE); + + let response_stream = client_clone.disperse_blob_authenticated(ReceiverStream::new(rx)); + let padded_data = convert_by_padding_empty_byte(&data); + + // 1. send DisperseBlobRequest + self.disperse_data(padded_data, &tx).await?; + + // this await is blocked until the first response on the stream, so we only await after sending the `DisperseBlobRequest` + let mut response_stream = response_stream.await?.into_inner(); + + // 2. receive BlobAuthHeader + let blob_auth_header = self.receive_blob_auth_header(&mut response_stream).await?; + + // 3. sign and send BlobAuthHeader + self.submit_authentication_data(blob_auth_header.clone(), &tx) + .await?; + + // 4. receive DisperseBlobReply + let reply = response_stream + .next() + .await + .ok_or_else(|| anyhow::anyhow!("No response from server"))? + .unwrap() + .payload + .ok_or_else(|| anyhow::anyhow!("No payload in response"))?; + + let disperser::authenticated_reply::Payload::DisperseReply(disperse_reply) = reply else { + return Err(anyhow::anyhow!("Unexpected response from server")); + }; + + // 5. poll for blob status until it reaches the Confirmed state + let verification_proof = self + .await_for_inclusion(client_clone, disperse_reply) + .await?; + let blob_id = format!( + "{}:{}", + verification_proof.batch_id, verification_proof.blob_index + ); + tracing::info!("Blob dispatch confirmed, blob id: {}", blob_id); + + Ok(blob_id) + } + + async fn disperse_data( + &self, + data: Vec, + tx: &mpsc::Sender, + ) -> anyhow::Result<()> { + let req = disperser::AuthenticatedRequest { + payload: Some(DisperseRequest(disperser::DisperseBlobRequest { + data, + custom_quorum_numbers: vec![], + account_id: self.account_id.clone(), + })), + }; + + tx.send(req) + .await + .map_err(|e| anyhow::anyhow!("Failed to send DisperseBlobRequest: {}", e)) + } + + async fn submit_authentication_data( + &self, + blob_auth_header: BlobAuthHeader, + tx: &mpsc::Sender, + ) -> anyhow::Result<()> { + // TODO: replace challenge_parameter with actual auth header when it is available + let digest = zksync_basic_types::web3::keccak256( + &blob_auth_header.challenge_parameter.to_be_bytes(), + ); + let signature: RecoverableSignature = secp256k1::Secp256k1::signing_only() + .sign_ecdsa_recoverable( + &secp256k1::Message::from_slice(&digest[..])?, + &self.private_key, + ); + let (recovery_id, sig) = signature.serialize_compact(); + + let mut signature = Vec::with_capacity(65); + signature.extend_from_slice(&sig); + signature.push(recovery_id.to_i32() as u8); + + let req = disperser::AuthenticatedRequest { + payload: Some(AuthenticationData(disperser::AuthenticationData { + authentication_data: signature, + })), + }; + + tx.send(req) + .await + .map_err(|e| anyhow::anyhow!("Failed to send AuthenticationData: {}", e)) + } + + async fn receive_blob_auth_header( + &self, + response_stream: &mut Streaming, + ) -> anyhow::Result { + let reply = response_stream + .next() + .await + .ok_or_else(|| anyhow::anyhow!("No response from server"))?; + + let Ok(reply) = reply else { + return Err(anyhow::anyhow!("Err from server: {:?}", reply)); + }; + + let reply = reply + .payload + .ok_or_else(|| anyhow::anyhow!("No payload in response"))?; + + if let disperser::authenticated_reply::Payload::BlobAuthHeader(blob_auth_header) = reply { + Ok(blob_auth_header) + } else { + Err(anyhow::anyhow!("Unexpected response from server")) + } + } + + async fn await_for_inclusion( + &self, + mut client: DisperserClient, + disperse_blob_reply: DisperseBlobReply, + ) -> anyhow::Result { + let polling_request = disperser::BlobStatusRequest { + request_id: disperse_blob_reply.request_id, + }; + + loop { + tokio::time::sleep(self.polling_interval).await; + let resp = client + .get_blob_status(polling_request.clone()) + .await? + .into_inner(); + + match disperser::BlobStatus::try_from(resp.status)? { + disperser::BlobStatus::Processing | disperser::BlobStatus::Dispersing => {} + disperser::BlobStatus::Failed => { + return Err(anyhow::anyhow!("Blob dispatch failed")) + } + disperser::BlobStatus::InsufficientSignatures => { + return Err(anyhow::anyhow!("Insufficient signatures")) + } + disperser::BlobStatus::Confirmed | disperser::BlobStatus::Finalized => { + let verification_proof = resp + .info + .ok_or_else(|| anyhow::anyhow!("No blob header in response"))? + .blob_verification_proof + .ok_or_else(|| anyhow::anyhow!("No blob verification proof in response"))?; + + return Ok(verification_proof); + } + + _ => return Err(anyhow::anyhow!("Received unknown blob status")), + } + } + } +} + +fn get_account_id(secret_key: &SecretKey) -> String { + let public_key = + secp256k1::PublicKey::from_secret_key(&secp256k1::Secp256k1::new(), secret_key); + let hex = hex::encode(public_key.serialize_uncompressed()); + + format!("0x{}", hex) +} + +fn convert_by_padding_empty_byte(data: &[u8]) -> Vec { + let parse_size = DATA_CHUNK_SIZE - 1; + + // Calculate the number of chunks + let data_len = (data.len() + parse_size - 1) / parse_size; + + // Pre-allocate `valid_data` with enough space for all chunks + let mut valid_data = vec![0u8; data_len * DATA_CHUNK_SIZE]; + let mut valid_end = data_len * DATA_CHUNK_SIZE; + + for (i, chunk) in data.chunks(parse_size).enumerate() { + let offset = i * DATA_CHUNK_SIZE; + valid_data[offset] = 0x00; // Set first byte of each chunk to 0x00 for big-endian compliance + + let copy_end = offset + 1 + chunk.len(); + valid_data[offset + 1..copy_end].copy_from_slice(chunk); + + if i == data_len - 1 && chunk.len() < parse_size { + valid_end = offset + 1 + chunk.len(); + } + } + + valid_data.truncate(valid_end); + valid_data +} diff --git a/core/node/da_clients/src/lib.rs b/core/node/da_clients/src/lib.rs index 48311ce4c3f2..8a4c565a650a 100644 --- a/core/node/da_clients/src/lib.rs +++ b/core/node/da_clients/src/lib.rs @@ -1,3 +1,6 @@ pub mod avail; +pub mod celestia; +pub mod eigen; pub mod no_da; pub mod object_store; +mod utils; diff --git a/core/node/da_clients/src/no_da.rs b/core/node/da_clients/src/no_da.rs index 2710c9ce9d9b..db0557510ed2 100644 --- a/core/node/da_clients/src/no_da.rs +++ b/core/node/da_clients/src/no_da.rs @@ -15,7 +15,7 @@ impl DataAvailabilityClient for NoDAClient { } async fn get_inclusion_data(&self, _: &str) -> Result, DAError> { - return Ok(Some(InclusionData::default())); + Ok(Some(InclusionData::default())) } fn clone_boxed(&self) -> Box { diff --git a/core/node/da_clients/src/utils.rs b/core/node/da_clients/src/utils.rs new file mode 100644 index 000000000000..d717d41f0e03 --- /dev/null +++ b/core/node/da_clients/src/utils.rs @@ -0,0 +1,15 @@ +use zksync_da_client::types::DAError; + +pub fn to_non_retriable_da_error(error: impl Into) -> DAError { + DAError { + error: error.into(), + is_retriable: false, + } +} + +pub fn to_retriable_da_error(error: impl Into) -> DAError { + DAError { + error: error.into(), + is_retriable: true, + } +} diff --git a/core/node/da_dispatcher/Cargo.toml b/core/node/da_dispatcher/Cargo.toml index 8a10d6813a5a..57d00cabaaa8 100644 --- a/core/node/da_dispatcher/Cargo.toml +++ b/core/node/da_dispatcher/Cargo.toml @@ -14,7 +14,6 @@ categories.workspace = true [dependencies] vise.workspace = true zksync_dal.workspace = true -zksync_utils.workspace = true zksync_config.workspace = true zksync_types.workspace = true zksync_da_client.workspace = true diff --git a/core/node/da_dispatcher/src/da_dispatcher.rs b/core/node/da_dispatcher/src/da_dispatcher.rs index f8e6f6b31723..2cdde9951be9 100644 --- a/core/node/da_dispatcher/src/da_dispatcher.rs +++ b/core/node/da_dispatcher/src/da_dispatcher.rs @@ -137,6 +137,8 @@ impl DataAvailabilityDispatcher { }; let inclusion_data = if self.config.use_dummy_inclusion_data() { + Some(InclusionData { data: vec![] }) + } else { self.client .get_inclusion_data(blob_info.blob_id.as_str()) .await @@ -146,10 +148,6 @@ impl DataAvailabilityDispatcher { blob_info.blob_id, blob_info.l1_batch_number ) })? - } else { - // if the inclusion verification is disabled, we don't need to wait for the inclusion - // data before committing the batch, so simply return an empty vector - Some(InclusionData { data: vec![] }) }; let Some(inclusion_data) = inclusion_data else { diff --git a/core/node/db_pruner/src/tests.rs b/core/node/db_pruner/src/tests.rs index a5458e996e1e..99fbada423dc 100644 --- a/core/node/db_pruner/src/tests.rs +++ b/core/node/db_pruner/src/tests.rs @@ -122,6 +122,7 @@ async fn insert_l2_blocks( virtual_blocks: 0, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; conn.blocks_dal() diff --git a/core/node/eth_sender/Cargo.toml b/core/node/eth_sender/Cargo.toml index a7aa88c3550e..a33536baa986 100644 --- a/core/node/eth_sender/Cargo.toml +++ b/core/node/eth_sender/Cargo.toml @@ -17,7 +17,6 @@ zksync_dal.workspace = true zksync_config.workspace = true zksync_contracts.workspace = true zksync_eth_client.workspace = true -zksync_utils.workspace = true zksync_l1_contract_interface.workspace = true zksync_object_store.workspace = true zksync_prover_interface.workspace = true diff --git a/core/node/eth_sender/src/aggregated_operations.rs b/core/node/eth_sender/src/aggregated_operations.rs index 2dfaf5942659..5271d42d3b75 100644 --- a/core/node/eth_sender/src/aggregated_operations.rs +++ b/core/node/eth_sender/src/aggregated_operations.rs @@ -3,13 +3,17 @@ use std::ops; use zksync_l1_contract_interface::i_executor::methods::{ExecuteBatches, ProveBatches}; use zksync_types::{ aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, - pubdata_da::PubdataDA, L1BatchNumber, ProtocolVersionId, + pubdata_da::PubdataSendingMode, L1BatchNumber, ProtocolVersionId, }; #[allow(clippy::large_enum_variant)] #[derive(Debug, Clone)] pub enum AggregatedOperation { - Commit(L1BatchWithMetadata, Vec, PubdataDA), + Commit( + L1BatchWithMetadata, + Vec, + PubdataSendingMode, + ), PublishProofOnchain(ProveBatches), Execute(ExecuteBatches), } diff --git a/core/node/eth_sender/src/aggregator.rs b/core/node/eth_sender/src/aggregator.rs index 4045e9ca3d80..432804a21b2e 100644 --- a/core/node/eth_sender/src/aggregator.rs +++ b/core/node/eth_sender/src/aggregator.rs @@ -11,7 +11,7 @@ use zksync_types::{ commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, helpers::unix_timestamp_ms, protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, L1BatchNumber, ProtocolVersionId, }; @@ -36,7 +36,7 @@ pub struct Aggregator { /// means no wait is needed: nonces will still provide the correct ordering of /// transactions. operate_4844_mode: bool, - pubdata_da: PubdataDA, + pubdata_da: PubdataSendingMode, commitment_mode: L1BatchCommitmentMode, } @@ -47,8 +47,7 @@ impl Aggregator { operate_4844_mode: bool, commitment_mode: L1BatchCommitmentMode, ) -> Self { - let pubdata_da = config.pubdata_sending_mode.into(); - + let pubdata_da = config.pubdata_sending_mode; Self { commit_criteria: vec![ Box::from(NumberCriterion { @@ -476,7 +475,7 @@ impl Aggregator { } } - pub fn pubdata_da(&self) -> PubdataDA { + pub fn pubdata_da(&self) -> PubdataSendingMode { self.pubdata_da } diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index a08d16f456a9..ac9ed4aaaadb 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -19,7 +19,7 @@ use zksync_types::{ ethabi::{Function, Token}, l2_to_l1_log::UserL2ToL1Log, protocol_version::{L1VerifierConfig, PACKED_SEMVER_MINOR_MASK}, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, settlement::SettlementMode, web3::{contract::Error as Web3ContractError, BlockNumber}, Address, L2ChainId, ProtocolVersionId, SLChainId, H256, U256, @@ -505,11 +505,12 @@ impl EthTxAggregator { ) }; - let l1_batch_for_sidecar = if PubdataDA::Blobs == self.aggregator.pubdata_da() { - Some(l1_batches[0].clone()) - } else { - None - }; + let l1_batch_for_sidecar = + if PubdataSendingMode::Blobs == self.aggregator.pubdata_da() { + Some(l1_batches[0].clone()) + } else { + None + }; Self::encode_commit_data(encoding_fn, &commit_data, l1_batch_for_sidecar) } diff --git a/core/node/eth_sender/src/eth_tx_manager.rs b/core/node/eth_sender/src/eth_tx_manager.rs index 7de91a3b7736..6992bea1007c 100644 --- a/core/node/eth_sender/src/eth_tx_manager.rs +++ b/core/node/eth_sender/src/eth_tx_manager.rs @@ -1,4 +1,7 @@ -use std::{sync::Arc, time::Duration}; +use std::{ + sync::Arc, + time::{Duration, SystemTime}, +}; use tokio::sync::watch; use zksync_config::configs::eth_sender::SenderConfig; @@ -9,7 +12,6 @@ use zksync_eth_client::{ use zksync_node_fee_model::l1_gas_price::TxParamsProvider; use zksync_shared_metrics::BlockL1Stage; use zksync_types::{eth_sender::EthTx, Address, L1BlockNumber, H256, U256}; -use zksync_utils::time::seconds_since_epoch; use super::{metrics::METRICS, EthSenderError}; use crate::{ @@ -501,9 +503,13 @@ impl EthTxManager { ); let tx_type_label = tx.tx_type.into(); METRICS.l1_gas_used[&tx_type_label].observe(gas_used.low_u128() as f64); - METRICS.l1_tx_mined_latency[&tx_type_label].observe(Duration::from_secs( - seconds_since_epoch() - tx.created_at_timestamp, - )); + + let duration_since_epoch = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("incorrect system time"); + let tx_latency = + duration_since_epoch.saturating_sub(Duration::from_secs(tx.created_at_timestamp)); + METRICS.l1_tx_mined_latency[&tx_type_label].observe(tx_latency); let sent_at_block = storage .eth_sender_dal() diff --git a/core/node/eth_sender/src/metrics.rs b/core/node/eth_sender/src/metrics.rs index 462fe3ed6e59..571837036045 100644 --- a/core/node/eth_sender/src/metrics.rs +++ b/core/node/eth_sender/src/metrics.rs @@ -1,12 +1,14 @@ //! Metrics for the Ethereum sender component. -use std::{fmt, time::Duration}; +use std::{ + fmt, + time::{Duration, SystemTime}, +}; use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics}; use zksync_dal::{Connection, Core, CoreDal}; use zksync_shared_metrics::{BlockL1Stage, BlockStage, APP_METRICS}; use zksync_types::{aggregated_operations::AggregatedActionType, eth_sender::EthTx}; -use zksync_utils::time::seconds_since_epoch; use crate::abstract_l1_interface::{L1BlockNumbers, OperatorType}; @@ -143,10 +145,13 @@ impl EthSenderMetrics { return; } + let duration_since_epoch = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("incorrect system time"); for statistics in l1_batches_statistics { - APP_METRICS.block_latency[&stage].observe(Duration::from_secs( - seconds_since_epoch() - statistics.timestamp, - )); + let block_latency = + duration_since_epoch.saturating_sub(Duration::from_secs(statistics.timestamp)); + APP_METRICS.block_latency[&stage].observe(block_latency); APP_METRICS.processed_txs[&stage.into()] .inc_by(statistics.l2_tx_count as u64 + statistics.l1_tx_count as u64); APP_METRICS.processed_l1_txs[&stage.into()].inc_by(statistics.l1_tx_count as u64); diff --git a/core/node/eth_sender/src/publish_criterion.rs b/core/node/eth_sender/src/publish_criterion.rs index 52d861ce0af6..30f0820b148a 100644 --- a/core/node/eth_sender/src/publish_criterion.rs +++ b/core/node/eth_sender/src/publish_criterion.rs @@ -8,7 +8,7 @@ use zksync_types::{ aggregated_operations::AggregatedActionType, commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, ethabi, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, L1BatchNumber, }; @@ -202,7 +202,7 @@ impl L1BatchPublishCriterion for GasCriterion { pub struct DataSizeCriterion { pub op: AggregatedActionType, pub data_limit: usize, - pub pubdata_da: PubdataDA, + pub pubdata_da: PubdataSendingMode, pub commitment_mode: L1BatchCommitmentMode, } diff --git a/core/node/eth_sender/src/tester.rs b/core/node/eth_sender/src/tester.rs index 86a8c477f9fe..646df1dc1a7b 100644 --- a/core/node/eth_sender/src/tester.rs +++ b/core/node/eth_sender/src/tester.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use zksync_config::{ - configs::eth_sender::{ProofSendingMode, PubdataSendingMode, SenderConfig}, + configs::eth_sender::{ProofSendingMode, SenderConfig}, ContractsConfig, EthConfig, GasAdjusterConfig, }; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; @@ -12,7 +12,7 @@ use zksync_node_test_utils::{create_l1_batch, l1_batch_metadata_to_commitment_ar use zksync_object_store::MockObjectStore; use zksync_types::{ aggregated_operations::AggregatedActionType, block::L1BatchHeader, - commitment::L1BatchCommitmentMode, eth_sender::EthTx, pubdata_da::PubdataDA, + commitment::L1BatchCommitmentMode, eth_sender::EthTx, pubdata_da::PubdataSendingMode, settlement::SettlementMode, Address, L1BatchNumber, ProtocolVersion, H256, }; @@ -485,9 +485,9 @@ impl EthSenderTester { pub async fn save_commit_tx(&mut self, l1_batch_number: L1BatchNumber) -> EthTx { assert_eq!(l1_batch_number, self.next_l1_batch_number_to_commit); let pubdata_mode = if self.pubdata_sending_mode == PubdataSendingMode::Blobs { - PubdataDA::Blobs + PubdataSendingMode::Blobs } else { - PubdataDA::Calldata + PubdataSendingMode::Calldata }; let operation = AggregatedOperation::Commit( l1_batch_with_metadata( diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index 9e844a8b8537..8e5032a69cfc 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -126,6 +126,10 @@ pub(crate) fn default_l1_batch_metadata() -> L1BatchMetadata { events_queue_commitment: Some(H256::zero()), bootloader_initial_content_commitment: Some(H256::zero()), state_diffs_compressed: vec![], + state_diff_hash: Some(H256::default()), + local_root: Some(H256::default()), + aggregation_root: Some(H256::default()), + da_inclusion_data: Some(vec![]), } } diff --git a/core/node/eth_watch/src/lib.rs b/core/node/eth_watch/src/lib.rs index a832733b3559..4185878d2ac4 100644 --- a/core/node/eth_watch/src/lib.rs +++ b/core/node/eth_watch/src/lib.rs @@ -140,7 +140,7 @@ impl EthWatch { let finalized_block = client.finalized_block_number().await?; let from_block = storage - .processed_events_dal() + .eth_watcher_dal() .get_or_set_next_block_to_process( processor.event_type(), chain_id, @@ -180,7 +180,7 @@ impl EthWatch { }; storage - .processed_events_dal() + .eth_watcher_dal() .update_next_block_to_process( processor.event_type(), chain_id, diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs index d9faf7b664e6..12ac8bdbf3f7 100644 --- a/core/node/eth_watch/src/tests.rs +++ b/core/node/eth_watch/src/tests.rs @@ -620,6 +620,9 @@ async fn get_all_db_txs(storage: &mut Connection<'_, Core>) -> Vec .sync_mempool(&[], &[], 0, 0, 1000) .await .unwrap() + .into_iter() + .map(|x| x.0) + .collect() } fn tx_into_log(tx: L1Tx) -> Log { diff --git a/core/node/fee_model/Cargo.toml b/core/node/fee_model/Cargo.toml index 8760b97d9db3..a84a7c5c2173 100644 --- a/core/node/fee_model/Cargo.toml +++ b/core/node/fee_model/Cargo.toml @@ -16,9 +16,7 @@ zksync_types.workspace = true zksync_dal.workspace = true zksync_config.workspace = true zksync_eth_client.workspace = true -zksync_utils.workspace = true zksync_web3_decl.workspace = true -bigdecimal.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index 27cdc7f5d5e0..6fce46f77225 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -6,9 +6,12 @@ use std::{ }; use tokio::sync::watch; -use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; +use zksync_config::GasAdjusterConfig; use zksync_eth_client::EthFeeInterface; -use zksync_types::{commitment::L1BatchCommitmentMode, L1_GAS_PER_PUBDATA_BYTE, U256}; +use zksync_types::{ + commitment::L1BatchCommitmentMode, pubdata_da::PubdataSendingMode, L1_GAS_PER_PUBDATA_BYTE, + U256, +}; use zksync_web3_decl::client::{DynClient, L1, L2}; use self::metrics::METRICS; @@ -85,8 +88,8 @@ impl GasAdjuster { anyhow::ensure!(client.gateway_mode, "Must be L2 client in L2 mode"); anyhow::ensure!( - matches!(pubdata_sending_mode, PubdataSendingMode::RelayedL2Calldata), - "Only relayed L2 calldata is available for L2 mode, got: {pubdata_sending_mode:?}" + matches!(pubdata_sending_mode, PubdataSendingMode::RelayedL2Calldata | PubdataSendingMode::Custom), + "Only relayed L2 calldata or Custom is available for L2 mode, got: {pubdata_sending_mode:?}" ); } else { anyhow::ensure!(!client.gateway_mode, "Must be L1 client in L1 mode"); diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs index 47023203de0e..ab649e2d7c90 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs @@ -1,9 +1,11 @@ use std::{collections::VecDeque, sync::RwLockReadGuard}; use test_casing::test_casing; -use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; +use zksync_config::GasAdjusterConfig; use zksync_eth_client::{clients::MockSettlementLayer, BaseFees}; -use zksync_types::{commitment::L1BatchCommitmentMode, settlement::SettlementMode}; +use zksync_types::{ + commitment::L1BatchCommitmentMode, pubdata_da::PubdataSendingMode, settlement::SettlementMode, +}; use zksync_web3_decl::client::L2; use super::{GasAdjuster, GasStatistics, GasStatisticsInner}; diff --git a/core/node/fee_model/src/lib.rs b/core/node/fee_model/src/lib.rs index fe4f6a27ce29..380a279cccc1 100644 --- a/core/node/fee_model/src/lib.rs +++ b/core/node/fee_model/src/lib.rs @@ -3,14 +3,9 @@ use std::{fmt, fmt::Debug, sync::Arc}; use anyhow::Context as _; use async_trait::async_trait; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_types::{ - fee_model::{ - BaseTokenConversionRatio, BatchFeeInput, FeeModelConfig, FeeModelConfigV2, FeeParams, - FeeParamsV1, FeeParamsV2, L1PeggedBatchFeeModelInput, PubdataIndependentBatchFeeModelInput, - }, - U256, +use zksync_types::fee_model::{ + BaseTokenConversionRatio, BatchFeeInput, FeeModelConfig, FeeParams, FeeParamsV1, FeeParamsV2, }; -use zksync_utils::ceil_div_u256; use crate::l1_gas_price::GasAdjuster; @@ -34,13 +29,7 @@ pub trait BatchFeeModelInputProvider: fmt::Debug + 'static + Send + Sync { l1_pubdata_price_scale_factor: f64, ) -> anyhow::Result { let params = self.get_fee_model_params(); - Ok( - ::default_batch_fee_input_scaled( - params, - l1_gas_price_scale_factor, - l1_pubdata_price_scale_factor, - ), - ) + Ok(params.scale(l1_gas_price_scale_factor, l1_pubdata_price_scale_factor)) } /// Returns the fee model parameters using the denomination of the base token used (WEI for ETH). @@ -48,27 +37,6 @@ pub trait BatchFeeModelInputProvider: fmt::Debug + 'static + Send + Sync { } impl dyn BatchFeeModelInputProvider { - /// Provides the default implementation of `get_batch_fee_input_scaled()` given [`FeeParams`]. - pub fn default_batch_fee_input_scaled( - params: FeeParams, - l1_gas_price_scale_factor: f64, - l1_pubdata_price_scale_factor: f64, - ) -> BatchFeeInput { - match params { - FeeParams::V1(params) => BatchFeeInput::L1Pegged(compute_batch_fee_model_input_v1( - params, - l1_gas_price_scale_factor, - )), - FeeParams::V2(params) => BatchFeeInput::PubdataIndependent( - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2( - params, - l1_gas_price_scale_factor, - l1_pubdata_price_scale_factor, - )), - ), - } - } - /// Returns the batch fee input as-is, i.e. without any scaling for the L1 gas and pubdata prices. pub async fn get_batch_fee_input(&self) -> anyhow::Result { self.get_batch_fee_input_scaled(1.0, 1.0).await @@ -168,122 +136,6 @@ impl BatchFeeModelInputProvider for ApiFeeInputProvider { } } -/// Calculates the batch fee input based on the main node parameters. -/// This function uses the `V1` fee model, i.e. where the pubdata price does not include the proving costs. -fn compute_batch_fee_model_input_v1( - params: FeeParamsV1, - l1_gas_price_scale_factor: f64, -) -> L1PeggedBatchFeeModelInput { - let l1_gas_price = (params.l1_gas_price as f64 * l1_gas_price_scale_factor) as u64; - - L1PeggedBatchFeeModelInput { - l1_gas_price, - fair_l2_gas_price: params.config.minimal_l2_gas_price, - } -} - -/// Calculates the batch fee input based on the main node parameters. -/// This function uses the `V2` fee model, i.e. where the pubdata price does not include the proving costs. -fn compute_batch_fee_model_input_v2( - params: FeeParamsV2, - l1_gas_price_scale_factor: f64, - l1_pubdata_price_scale_factor: f64, -) -> PubdataIndependentBatchFeeModelInput { - let config = params.config(); - let l1_gas_price = params.l1_gas_price(); - let l1_pubdata_price = params.l1_pubdata_price(); - - let FeeModelConfigV2 { - minimal_l2_gas_price, - compute_overhead_part, - pubdata_overhead_part, - batch_overhead_l1_gas, - max_gas_per_batch, - max_pubdata_per_batch, - } = config; - - // Firstly, we scale the gas price and pubdata price in case it is needed. - let l1_gas_price = (l1_gas_price as f64 * l1_gas_price_scale_factor) as u64; - let l1_pubdata_price = (l1_pubdata_price as f64 * l1_pubdata_price_scale_factor) as u64; - - // While the final results of the calculations are not expected to have any overflows, the intermediate computations - // might, so we use U256 for them. - let l1_batch_overhead_wei = U256::from(l1_gas_price) * U256::from(batch_overhead_l1_gas); - - let fair_l2_gas_price = { - // Firstly, we calculate which part of the overall overhead each unit of L2 gas should cover. - let l1_batch_overhead_per_gas = - ceil_div_u256(l1_batch_overhead_wei, U256::from(max_gas_per_batch)); - - // Then, we multiply by the `compute_overhead_part` to get the overhead for the computation for each gas. - // Also, this means that if we almost never close batches because of compute, the `compute_overhead_part` should be zero and so - // it is possible that the computation costs include for no overhead. - let gas_overhead_wei = - (l1_batch_overhead_per_gas.as_u64() as f64 * compute_overhead_part) as u64; - - // We sum up the minimal L2 gas price (i.e. the raw prover/compute cost of a single L2 gas) and the overhead for batch being closed. - minimal_l2_gas_price + gas_overhead_wei - }; - - let fair_pubdata_price = { - // Firstly, we calculate which part of the overall overhead each pubdata byte should cover. - let l1_batch_overhead_per_pubdata = - ceil_div_u256(l1_batch_overhead_wei, U256::from(max_pubdata_per_batch)); - - // Then, we multiply by the `pubdata_overhead_part` to get the overhead for each pubdata byte. - // Also, this means that if we almost never close batches because of pubdata, the `pubdata_overhead_part` should be zero and so - // it is possible that the pubdata costs include no overhead. - let pubdata_overhead_wei = - (l1_batch_overhead_per_pubdata.as_u64() as f64 * pubdata_overhead_part) as u64; - - // We sum up the raw L1 pubdata price (i.e. the expected price of publishing a single pubdata byte) and the overhead for batch being closed. - l1_pubdata_price + pubdata_overhead_wei - }; - - PubdataIndependentBatchFeeModelInput { - l1_gas_price, - fair_l2_gas_price, - fair_pubdata_price, - } -} - -/// Bootloader places limitations on fair_l2_gas_price and fair_pubdata_price. -/// (MAX_ALLOWED_FAIR_L2_GAS_PRICE and MAX_ALLOWED_FAIR_PUBDATA_PRICE in bootloader code respectively) -/// Server needs to clip this prices in order to allow chain continues operation at a loss. The alternative -/// would be to stop accepting the transactions until the conditions improve. -/// TODO (PE-153): to be removed when bootloader limitation is removed -fn clip_batch_fee_model_input_v2( - fee_model: PubdataIndependentBatchFeeModelInput, -) -> PubdataIndependentBatchFeeModelInput { - /// MAX_ALLOWED_FAIR_L2_GAS_PRICE - const MAXIMUM_L2_GAS_PRICE: u64 = 10_000_000_000_000; - /// MAX_ALLOWED_FAIR_PUBDATA_PRICE - const MAXIMUM_PUBDATA_PRICE: u64 = 1_000_000_000_000_000; - PubdataIndependentBatchFeeModelInput { - l1_gas_price: fee_model.l1_gas_price, - fair_l2_gas_price: if fee_model.fair_l2_gas_price < MAXIMUM_L2_GAS_PRICE { - fee_model.fair_l2_gas_price - } else { - tracing::warn!( - "Fair l2 gas price {} exceeds maximum. Limitting to {}", - fee_model.fair_l2_gas_price, - MAXIMUM_L2_GAS_PRICE - ); - MAXIMUM_L2_GAS_PRICE - }, - fair_pubdata_price: if fee_model.fair_pubdata_price < MAXIMUM_PUBDATA_PRICE { - fee_model.fair_pubdata_price - } else { - tracing::warn!( - "Fair pubdata price {} exceeds maximum. Limitting to {}", - fee_model.fair_pubdata_price, - MAXIMUM_PUBDATA_PRICE - ); - MAXIMUM_PUBDATA_PRICE - }, - } -} - /// Mock [`BatchFeeModelInputProvider`] implementation that returns a constant value. /// Intended to be used in tests only. #[derive(Debug)] @@ -307,308 +159,17 @@ mod tests { use std::num::NonZeroU64; use l1_gas_price::GasAdjusterClient; - use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; + use zksync_config::GasAdjusterConfig; use zksync_eth_client::{clients::MockSettlementLayer, BaseFees}; - use zksync_types::{commitment::L1BatchCommitmentMode, fee_model::BaseTokenConversionRatio}; + use zksync_types::{ + commitment::L1BatchCommitmentMode, + fee_model::{BaseTokenConversionRatio, FeeModelConfigV2}, + pubdata_da::PubdataSendingMode, + U256, + }; use super::*; - // To test that overflow never happens, we'll use giant L1 gas price, i.e. - // almost realistic very large value of 100k gwei. Since it is so large, we'll also - // use it for the L1 pubdata price. - const GWEI: u64 = 1_000_000_000; - const GIANT_L1_GAS_PRICE: u64 = 100_000 * GWEI; - - // As a small L2 gas price we'll use the value of 1 wei. - const SMALL_L1_GAS_PRICE: u64 = 1; - - #[test] - fn test_compute_batch_fee_model_input_v2_giant_numbers() { - let config = FeeModelConfigV2 { - minimal_l2_gas_price: GIANT_L1_GAS_PRICE, - // We generally don't expect those values to be larger than 1. Still, in theory the operator - // may need to set higher values in extreme cases. - compute_overhead_part: 5.0, - pubdata_overhead_part: 5.0, - // The batch overhead would likely never grow beyond that - batch_overhead_l1_gas: 1_000_000, - // Let's imagine that for some reason the limit is relatively small - max_gas_per_batch: 50_000_000, - // The pubdata will likely never go below that - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - GIANT_L1_GAS_PRICE, - GIANT_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - // We'll use scale factor of 3.0 - let input = compute_batch_fee_model_input_v2(params, 3.0, 3.0); - - assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE * 3); - assert_eq!(input.fair_l2_gas_price, 130_000_000_000_000); - assert_eq!(input.fair_pubdata_price, 15_300_000_000_000_000); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_small_numbers() { - // Here we assume that the operator wants to make the lives of users as cheap as possible. - let config = FeeModelConfigV2 { - minimal_l2_gas_price: SMALL_L1_GAS_PRICE, - compute_overhead_part: 0.0, - pubdata_overhead_part: 0.0, - batch_overhead_l1_gas: 0, - max_gas_per_batch: 50_000_000, - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - SMALL_L1_GAS_PRICE, - SMALL_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - let input = - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); - - assert_eq!(input.l1_gas_price, SMALL_L1_GAS_PRICE); - assert_eq!(input.fair_l2_gas_price, SMALL_L1_GAS_PRICE); - assert_eq!(input.fair_pubdata_price, SMALL_L1_GAS_PRICE); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_only_pubdata_overhead() { - // Here we use sensible config, but when only pubdata is used to close the batch - let config = FeeModelConfigV2 { - minimal_l2_gas_price: 100_000_000_000, - compute_overhead_part: 0.0, - pubdata_overhead_part: 1.0, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - GIANT_L1_GAS_PRICE, - GIANT_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - let input = - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); - assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); - // The fair L2 gas price is identical to the minimal one. - assert_eq!(input.fair_l2_gas_price, 100_000_000_000); - // The fair pubdata price is the minimal one plus the overhead. - assert_eq!(input.fair_pubdata_price, 800_000_000_000_000); - } - - #[test] - fn test_compute_baxtch_fee_model_input_v2_only_compute_overhead() { - // Here we use sensible config, but when only compute is used to close the batch - let config = FeeModelConfigV2 { - minimal_l2_gas_price: 100_000_000_000, - compute_overhead_part: 1.0, - pubdata_overhead_part: 0.0, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - GIANT_L1_GAS_PRICE, - GIANT_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - let input = compute_batch_fee_model_input_v2(params, 1.0, 1.0); - assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); - // The fair L2 gas price is identical to the minimal one, plus the overhead - assert_eq!(input.fair_l2_gas_price, 240_000_000_000); - // The fair pubdata price is equal to the original one. - assert_eq!(input.fair_pubdata_price, GIANT_L1_GAS_PRICE); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_param_tweaking() { - // In this test we generally checking that each param behaves as expected - let base_config = FeeModelConfigV2 { - minimal_l2_gas_price: 100_000_000_000, - compute_overhead_part: 0.5, - pubdata_overhead_part: 0.5, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let base_params = FeeParamsV2::new( - base_config, - 1_000_000_000, - 1_000_000_000, - BaseTokenConversionRatio::default(), - ); - - let base_input = compute_batch_fee_model_input_v2(base_params, 1.0, 1.0); - - let base_input_larger_l1_gas_price = compute_batch_fee_model_input_v2( - FeeParamsV2::new( - base_config, - 2_000_000_000, // double the L1 gas price - 1_000_000_000, - BaseTokenConversionRatio::default(), - ), - 1.0, - 1.0, - ); - let base_input_scaled_l1_gas_price = - compute_batch_fee_model_input_v2(base_params, 2.0, 1.0); - assert_eq!( - base_input_larger_l1_gas_price, base_input_scaled_l1_gas_price, - "Scaling has the correct effect for the L1 gas price" - ); - assert!( - base_input.fair_l2_gas_price < base_input_larger_l1_gas_price.fair_l2_gas_price, - "L1 gas price increase raises L2 gas price" - ); - assert!( - base_input.fair_pubdata_price < base_input_larger_l1_gas_price.fair_pubdata_price, - "L1 gas price increase raises pubdata price" - ); - - let base_input_larger_pubdata_price = compute_batch_fee_model_input_v2( - FeeParamsV2::new( - base_config, - 1_000_000_000, - 2_000_000_000, // double the L1 pubdata price - BaseTokenConversionRatio::default(), - ), - 1.0, - 1.0, - ); - let base_input_scaled_pubdata_price = - compute_batch_fee_model_input_v2(base_params, 1.0, 2.0); - assert_eq!( - base_input_larger_pubdata_price, base_input_scaled_pubdata_price, - "Scaling has the correct effect for the pubdata price" - ); - assert_eq!( - base_input.fair_l2_gas_price, base_input_larger_pubdata_price.fair_l2_gas_price, - "L1 pubdata increase has no effect on L2 gas price" - ); - assert!( - base_input.fair_pubdata_price < base_input_larger_pubdata_price.fair_pubdata_price, - "Pubdata price increase raises pubdata price" - ); - - let base_input_larger_max_gas = compute_batch_fee_model_input_v2( - FeeParamsV2::new( - FeeModelConfigV2 { - max_gas_per_batch: base_config.max_gas_per_batch * 2, - ..base_config - }, - base_params.l1_gas_price(), - base_params.l1_pubdata_price(), - BaseTokenConversionRatio::default(), - ), - 1.0, - 1.0, - ); - assert!( - base_input.fair_l2_gas_price > base_input_larger_max_gas.fair_l2_gas_price, - "Max gas increase lowers L2 gas price" - ); - assert_eq!( - base_input.fair_pubdata_price, base_input_larger_max_gas.fair_pubdata_price, - "Max gas increase has no effect on pubdata price" - ); - - let base_input_larger_max_pubdata = compute_batch_fee_model_input_v2( - FeeParamsV2::new( - FeeModelConfigV2 { - max_pubdata_per_batch: base_config.max_pubdata_per_batch * 2, - ..base_config - }, - base_params.l1_gas_price(), - base_params.l1_pubdata_price(), - BaseTokenConversionRatio::default(), - ), - 1.0, - 1.0, - ); - assert_eq!( - base_input.fair_l2_gas_price, base_input_larger_max_pubdata.fair_l2_gas_price, - "Max pubdata increase has no effect on L2 gas price" - ); - assert!( - base_input.fair_pubdata_price > base_input_larger_max_pubdata.fair_pubdata_price, - "Max pubdata increase lowers pubdata price" - ); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_gas_price_over_limit_due_to_l1_gas() { - // In this test we check the gas price limit works as expected - let config = FeeModelConfigV2 { - minimal_l2_gas_price: 100 * GWEI, - compute_overhead_part: 0.5, - pubdata_overhead_part: 0.5, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let l1_gas_price = 1_000_000_000 * GWEI; - let params = FeeParamsV2::new( - config, - l1_gas_price, - GIANT_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - let input = - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); - assert_eq!(input.l1_gas_price, l1_gas_price); - // The fair L2 gas price is identical to the maximum - assert_eq!(input.fair_l2_gas_price, 10_000 * GWEI); - assert_eq!(input.fair_pubdata_price, 1_000_000 * GWEI); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_gas_price_over_limit_due_to_conversion_rate() { - // In this test we check the gas price limit works as expected - let config = FeeModelConfigV2 { - minimal_l2_gas_price: GWEI, - compute_overhead_part: 0.5, - pubdata_overhead_part: 0.5, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - GWEI, - 2 * GWEI, - BaseTokenConversionRatio { - numerator: NonZeroU64::new(3_000_000).unwrap(), - denominator: NonZeroU64::new(1).unwrap(), - }, - ); - - let input = - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); - assert_eq!(input.l1_gas_price, 3_000_000 * GWEI); - // The fair L2 gas price is identical to the maximum - assert_eq!(input.fair_l2_gas_price, 10_000 * GWEI); - assert_eq!(input.fair_pubdata_price, 1_000_000 * GWEI); - } - #[derive(Debug, Clone)] struct DummyTokenRatioProvider { ratio: BaseTokenConversionRatio, diff --git a/core/node/genesis/Cargo.toml b/core/node/genesis/Cargo.toml index 71c4c45e9e38..d625d7186bdf 100644 --- a/core/node/genesis/Cargo.toml +++ b/core/node/genesis/Cargo.toml @@ -20,7 +20,6 @@ zksync_contracts.workspace = true zksync_eth_client.workspace = true zksync_merkle_tree.workspace = true zksync_system_constants.workspace = true -zksync_utils.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index 3e4c0ee30b94..0a0e77d97f95 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -17,16 +17,17 @@ use zksync_multivm::utils::get_max_gas_per_pubdata_byte; use zksync_system_constants::PRIORITY_EXPIRATION; use zksync_types::{ block::{BlockGasCount, DeployedContract, L1BatchHeader, L2BlockHasher, L2BlockHeader}, + bytecode::BytecodeHash, commitment::{CommitmentInput, L1BatchCommitment}, fee_model::BatchFeeInput, protocol_upgrade::decode_set_chain_id_event, protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, system_contracts::get_system_smart_contracts, + u256_to_h256, web3::{BlockNumber, FilterBuilder}, AccountTreeId, Address, Bloom, L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, ProtocolVersion, ProtocolVersionId, StorageKey, H256, U256, }; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; use crate::utils::{ add_eth_token, get_deduped_log_queries, get_storage_logs, @@ -409,6 +410,7 @@ pub async fn create_genesis_l1_batch( virtual_blocks: 0, gas_limit: 0, logs_bloom: Bloom::zero(), + pubdata_params: Default::default(), }; let mut transaction = storage.start_transaction().await?; @@ -445,7 +447,12 @@ pub async fn create_genesis_l1_batch( let factory_deps = system_contracts .iter() - .map(|c| (hash_bytecode(&c.bytecode), c.bytecode.clone())) + .map(|c| { + ( + BytecodeHash::for_bytecode(&c.bytecode).value(), + c.bytecode.clone(), + ) + }) .collect(); insert_base_system_contracts_to_factory_deps(&mut transaction, base_system_contracts).await?; diff --git a/core/node/genesis/src/utils.rs b/core/node/genesis/src/utils.rs index 6042513537cd..a51f49a166a2 100644 --- a/core/node/genesis/src/utils.rs +++ b/core/node/genesis/src/utils.rs @@ -5,18 +5,19 @@ use zksync_contracts::BaseSystemContracts; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::{ circuit_sequencer_api_latest::sort_storage_access::sort_storage_access_queries, - zk_evm_latest::aux_structures::{LogQuery as MultiVmLogQuery, Timestamp as MultiVMTimestamp}, + zk_evm_latest::aux_structures::{LogQuery as MultiVmLogQuery, Timestamp as MultiVmTimestamp}, }; use zksync_system_constants::{DEFAULT_ERA_CHAIN_ID, ETHEREUM_ADDRESS}; use zksync_types::{ block::{DeployedContract, L1BatchTreeData}, + bytecode::BytecodeHash, commitment::L1BatchCommitment, - get_code_key, get_known_code_key, get_system_context_init_logs, + get_code_key, get_known_code_key, get_system_context_init_logs, h256_to_u256, tokens::{TokenInfo, TokenMetadata}, + u256_to_h256, zk_evm_types::{LogQuery, Timestamp}, AccountTreeId, L1BatchNumber, L2BlockNumber, L2ChainId, StorageKey, StorageLog, H256, }; -use zksync_utils::{be_words_to_bytes, bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use crate::GenesisError; @@ -50,7 +51,7 @@ pub(super) fn get_storage_logs(system_contracts: &[DeployedContract]) -> Vec = system_contracts .iter() .map(|contract| { - let hash = hash_bytecode(&contract.bytecode); + let hash = BytecodeHash::for_bytecode(&contract.bytecode).value(); let known_code_key = get_known_code_key(&hash); let marked_known_value = H256::from_low_u64_be(1u64); @@ -62,7 +63,7 @@ pub(super) fn get_storage_logs(system_contracts: &[DeployedContract]) -> Vec = system_contracts .iter() .map(|contract| { - let hash = hash_bytecode(&contract.bytecode); + let hash = BytecodeHash::for_bytecode(&contract.bytecode).value(); let code_key = get_code_key(contract.account_id.address()); StorageLog::new_write_log(code_key, hash) }) @@ -83,7 +84,7 @@ pub(super) fn get_deduped_log_queries(storage_logs: &[StorageLog]) -> Vec(&self, serializer: S) -> Result { + serializer.serialize_str(&self.0.to_string()) + } +} + +impl<'de> Deserialize<'de> for HexNodeKey { + fn deserialize>(deserializer: D) -> Result { + struct HexNodeKeyVisitor; + + impl de::Visitor<'_> for HexNodeKeyVisitor { + type Value = HexNodeKey; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("hex-encoded versioned key like `123:c0ffee`") + } + + fn visit_str(self, v: &str) -> Result { + v.parse().map(HexNodeKey).map_err(de::Error::custom) + } + } + + deserializer.deserialize_str(HexNodeKeyVisitor) + } +} + +#[derive(Debug, Serialize)] +struct ApiLeafNode { + full_key: H256, + value_hash: H256, + leaf_index: u64, +} + +#[derive(Debug, Serialize)] +struct ApiChildRef { + hash: ValueHash, + version: u64, + is_leaf: bool, +} + +#[derive(Debug, Serialize)] +#[serde(transparent)] +struct ApiInternalNode(HashMap); + +#[derive(Debug, Serialize)] +struct ApiRawNode { + raw: web3::Bytes, + #[serde(skip_serializing_if = "Option::is_none")] + leaf: Option, + #[serde(skip_serializing_if = "Option::is_none")] + internal: Option, +} + +impl From for ApiRawNode { + fn from(node: RawNode) -> Self { + Self { + raw: web3::Bytes(node.raw), + leaf: node.leaf.map(|leaf| ApiLeafNode { + full_key: u256_to_h256(leaf.full_key), + value_hash: leaf.value_hash, + leaf_index: leaf.leaf_index, + }), + internal: node.internal.map(|internal| { + ApiInternalNode( + internal + .children() + .map(|(nibble, child_ref)| { + let nibble = if nibble < 10 { + b'0' + nibble + } else { + b'a' + nibble - 10 + }; + ( + char::from(nibble), + ApiChildRef { + hash: child_ref.hash, + version: child_ref.version, + is_leaf: child_ref.is_leaf, + }, + ) + }) + .collect(), + ) + }), + } + } +} + +#[derive(Debug, Deserialize)] +struct TreeNodesRequest { + keys: Vec, +} + +#[derive(Debug, Serialize)] +struct TreeNodesResponse { + nodes: HashMap, +} + +#[derive(Debug, Deserialize)] +struct StaleKeysRequest { + l1_batch_number: L1BatchNumber, +} + +#[derive(Debug, Serialize)] +struct StaleKeysResponse { + stale_keys: Vec, +} + /// Server-side tree API error. #[derive(Debug)] enum TreeApiServerError { @@ -343,6 +457,46 @@ impl AsyncTreeReader { Ok(Json(response)) } + async fn get_nodes_handler( + State(this): State, + Json(request): Json, + ) -> Json { + let latency = API_METRICS.latency[&MerkleTreeApiMethod::GetNodes].start(); + let keys: Vec<_> = request.keys.iter().map(|key| key.0).collect(); + let nodes = this.clone().raw_nodes(keys).await; + let nodes = request + .keys + .into_iter() + .zip(nodes) + .filter_map(|(key, node)| Some((key, node?.into()))) + .collect(); + let response = TreeNodesResponse { nodes }; + latency.observe(); + Json(response) + } + + async fn get_stale_keys_handler( + State(this): State, + Json(request): Json, + ) -> Json { + let latency = API_METRICS.latency[&MerkleTreeApiMethod::GetStaleKeys].start(); + let stale_keys = this.clone().raw_stale_keys(request.l1_batch_number).await; + let stale_keys = stale_keys.into_iter().map(HexNodeKey).collect(); + latency.observe(); + Json(StaleKeysResponse { stale_keys }) + } + + async fn bogus_stale_keys_handler( + State(this): State, + Json(request): Json, + ) -> Json { + let latency = API_METRICS.latency[&MerkleTreeApiMethod::GetBogusStaleKeys].start(); + let stale_keys = this.clone().bogus_stale_keys(request.l1_batch_number).await; + let stale_keys = stale_keys.into_iter().map(HexNodeKey).collect(); + latency.observe(); + Json(StaleKeysResponse { stale_keys }) + } + async fn create_api_server( self, bind_address: &SocketAddr, @@ -353,6 +507,15 @@ impl AsyncTreeReader { let app = Router::new() .route("/", routing::get(Self::info_handler)) .route("/proofs", routing::post(Self::get_proofs_handler)) + .route("/debug/nodes", routing::post(Self::get_nodes_handler)) + .route( + "/debug/stale-keys", + routing::post(Self::get_stale_keys_handler), + ) + .route( + "/debug/stale-keys/bogus", + routing::post(Self::bogus_stale_keys_handler), + ) .with_state(self); let listener = tokio::net::TcpListener::bind(bind_address) @@ -369,8 +532,8 @@ impl AsyncTreeReader { } tracing::info!("Stop signal received, Merkle tree API server is shutting down"); }) - .await - .context("Merkle tree API server failed")?; + .await + .context("Merkle tree API server failed")?; tracing::info!("Merkle tree API server shut down"); Ok(()) diff --git a/core/node/metadata_calculator/src/api_server/tests.rs b/core/node/metadata_calculator/src/api_server/tests.rs index 42a3152e6b53..9bb994cb4163 100644 --- a/core/node/metadata_calculator/src/api_server/tests.rs +++ b/core/node/metadata_calculator/src/api_server/tests.rs @@ -72,11 +72,82 @@ async fn merkle_tree_api() { assert_eq!(err.version_count, 6); assert_eq!(err.missing_version, 10); + let raw_nodes_response = api_client + .inner + .post(format!("http://{local_addr}/debug/nodes")) + .json(&serde_json::json!({ "keys": ["0:", "0:0"] })) + .send() + .await + .unwrap() + .error_for_status() + .unwrap(); + let raw_nodes_response: serde_json::Value = raw_nodes_response.json().await.unwrap(); + assert_raw_nodes_response(&raw_nodes_response); + + let raw_stale_keys_response = api_client + .inner + .post(format!("http://{local_addr}/debug/stale-keys")) + .json(&serde_json::json!({ "l1_batch_number": 1 })) + .send() + .await + .unwrap() + .error_for_status() + .unwrap(); + let raw_stale_keys_response: serde_json::Value = raw_stale_keys_response.json().await.unwrap(); + assert_raw_stale_keys_response(&raw_stale_keys_response); + + let raw_stale_keys_response = api_client + .inner + .post(format!("http://{local_addr}/debug/stale-keys/bogus")) + .json(&serde_json::json!({ "l1_batch_number": 1 })) + .send() + .await + .unwrap() + .error_for_status() + .unwrap(); + let raw_stale_keys_response: serde_json::Value = raw_stale_keys_response.json().await.unwrap(); + assert_eq!( + raw_stale_keys_response, + serde_json::json!({ "stale_keys": [] }) + ); + // Stop the calculator and the tree API server. stop_sender.send_replace(true); api_server_task.await.unwrap().unwrap(); } +fn assert_raw_nodes_response(response: &serde_json::Value) { + let response = response.as_object().expect("not an object"); + let response = response["nodes"].as_object().expect("not an object"); + let root = response["0:"].as_object().expect("not an object"); + assert!( + root.len() == 2 && root.contains_key("internal") && root.contains_key("raw"), + "{root:#?}" + ); + let root = root["internal"].as_object().expect("not an object"); + for key in root.keys() { + assert_eq!(key.len(), 1, "{key}"); + let key = key.as_bytes()[0]; + assert_matches!(key, b'0'..=b'9' | b'a'..=b'f'); + } + + let node = response["0:0"].as_object().expect("not an object"); + assert!( + node.len() == 2 && node.contains_key("internal") && node.contains_key("raw"), + "{node:#?}" + ); +} + +fn assert_raw_stale_keys_response(response: &serde_json::Value) { + let response = response.as_object().expect("not an object"); + let stale_keys = response["stale_keys"].as_array().expect("not an array"); + assert!(!stale_keys.is_empty()); // At least the root is always obsoleted + for stale_key in stale_keys { + let stale_key = stale_key.as_str().expect("not a string"); + stale_key.parse::().unwrap(); + } +} + #[tokio::test] async fn api_client_connection_error() { // Use an address that will definitely fail on a timeout. diff --git a/core/node/metadata_calculator/src/helpers.rs b/core/node/metadata_calculator/src/helpers.rs index b6989afb179f..b8d02067f8ea 100644 --- a/core/node/metadata_calculator/src/helpers.rs +++ b/core/node/metadata_calculator/src/helpers.rs @@ -22,6 +22,8 @@ use zksync_health_check::{CheckHealth, Health, HealthStatus, ReactiveHealthCheck use zksync_merkle_tree::{ domain::{TreeMetadata, ZkSyncTree, ZkSyncTreeReader}, recovery::{MerkleTreeRecovery, PersistenceThreadHandle}, + repair::StaleKeysRepairTask, + unstable::{NodeKey, RawNode}, Database, Key, MerkleTreeColumnFamily, NoVersionError, RocksDBWrapper, TreeEntry, TreeEntryWithProof, TreeInstruction, }; @@ -35,7 +37,7 @@ use zksync_types::{ use super::{ metrics::{LoadChangesStage, TreeUpdateStage, METRICS}, pruning::PruningHandles, - MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig, + MerkleTreeReaderConfig, MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig, }; /// General information about the Merkle tree. @@ -176,6 +178,40 @@ fn create_db_sync(config: &MetadataCalculatorConfig) -> anyhow::Result anyhow::Result { + tokio::task::spawn_blocking(move || { + let MerkleTreeReaderConfig { + db_path, + max_open_files, + multi_get_chunk_size, + block_cache_capacity, + include_indices_and_filters_in_block_cache, + } = config; + + tracing::info!( + "Initializing Merkle tree database at `{db_path}` (max open files: {max_open_files:?}) with {multi_get_chunk_size} multi-get chunk size, \ + {block_cache_capacity}B block cache (indices & filters included: {include_indices_and_filters_in_block_cache:?})" + ); + let mut db = RocksDB::with_options( + db_path.as_ref(), + RocksDBOptions { + block_cache_capacity: Some(block_cache_capacity), + include_indices_and_filters_in_block_cache, + max_open_files, + ..RocksDBOptions::default() + } + )?; + if cfg!(test) { + db = db.with_sync_writes(); + } + Ok(RocksDBWrapper::from(db)) + }) + .await + .context("panicked creating Merkle tree RocksDB")? +} + /// Wrapper around the "main" tree implementation used by [`MetadataCalculator`]. /// /// Async methods provided by this wrapper are not cancel-safe! This is probably not an issue; @@ -307,6 +343,13 @@ pub struct AsyncTreeReader { } impl AsyncTreeReader { + pub(super) fn new(db: RocksDBWrapper, mode: MerkleTreeMode) -> anyhow::Result { + Ok(Self { + inner: ZkSyncTreeReader::new(db)?, + mode, + }) + } + fn downgrade(&self) -> WeakAsyncTreeReader { WeakAsyncTreeReader { db: self.inner.db().clone().into_inner().downgrade(), @@ -366,6 +409,31 @@ impl AsyncTreeReader { .await .unwrap() } + + pub(crate) async fn raw_nodes(self, keys: Vec) -> Vec> { + tokio::task::spawn_blocking(move || self.inner.raw_nodes(&keys)) + .await + .unwrap() + } + + pub(crate) async fn raw_stale_keys(self, l1_batch_number: L1BatchNumber) -> Vec { + tokio::task::spawn_blocking(move || self.inner.raw_stale_keys(l1_batch_number)) + .await + .unwrap() + } + + pub(crate) async fn bogus_stale_keys(self, l1_batch_number: L1BatchNumber) -> Vec { + let version = l1_batch_number.0.into(); + tokio::task::spawn_blocking(move || { + StaleKeysRepairTask::bogus_stale_keys(self.inner.db(), version) + }) + .await + .unwrap() + } + + pub(crate) fn into_db(self) -> RocksDBWrapper { + self.inner.into_db() + } } /// Version of async tree reader that holds a weak reference to RocksDB. Used in [`MerkleTreeHealthCheck`]. diff --git a/core/node/metadata_calculator/src/lib.rs b/core/node/metadata_calculator/src/lib.rs index 451090694b2c..dddb53b4c52f 100644 --- a/core/node/metadata_calculator/src/lib.rs +++ b/core/node/metadata_calculator/src/lib.rs @@ -26,13 +26,16 @@ use self::{ pub use self::{ helpers::{AsyncTreeReader, LazyAsyncTreeReader, MerkleTreeInfo}, pruning::MerkleTreePruningTask, + repair::StaleKeysRepairTask, }; +use crate::helpers::create_readonly_db; pub mod api_server; mod helpers; mod metrics; mod pruning; mod recovery; +mod repair; #[cfg(test)] pub(crate) mod tests; mod updater; @@ -202,6 +205,11 @@ impl MetadataCalculator { MerkleTreePruningTask::new(pruning_handles, self.pool.clone(), poll_interval) } + /// This method should be called once. + pub fn stale_keys_repair_task(&self) -> StaleKeysRepairTask { + StaleKeysRepairTask::new(self.tree_reader()) + } + async fn create_tree(&self) -> anyhow::Result { self.health_updater .update(MerkleTreeHealth::Initialization.into()); @@ -264,3 +272,55 @@ impl MetadataCalculator { .await } } + +/// Configuration of [`TreeReaderTask`]. +#[derive(Debug, Clone)] +pub struct MerkleTreeReaderConfig { + /// Filesystem path to the RocksDB instance that stores the tree. + pub db_path: String, + /// Maximum number of files concurrently opened by RocksDB. Useful to fit into OS limits; can be used + /// as a rudimentary way to control RAM usage of the tree. + pub max_open_files: Option, + /// Chunk size for multi-get operations. Can speed up loading data for the Merkle tree on some environments, + /// but the effects vary wildly depending on the setup (e.g., the filesystem used). + pub multi_get_chunk_size: usize, + /// Capacity of RocksDB block cache in bytes. Reasonable values range from ~100 MiB to several GB. + pub block_cache_capacity: usize, + /// If specified, RocksDB indices and Bloom filters will be managed by the block cache, rather than + /// being loaded entirely into RAM on the RocksDB initialization. The block cache capacity should be increased + /// correspondingly; otherwise, RocksDB performance can significantly degrade. + pub include_indices_and_filters_in_block_cache: bool, +} + +/// Alternative to [`MetadataCalculator`] that provides readonly access to the Merkle tree. +#[derive(Debug)] +pub struct TreeReaderTask { + config: MerkleTreeReaderConfig, + tree_reader: watch::Sender>, +} + +impl TreeReaderTask { + /// Creates a new task with the provided configuration. + pub fn new(config: MerkleTreeReaderConfig) -> Self { + Self { + config, + tree_reader: watch::channel(None).0, + } + } + + /// Returns a reference to the tree reader. + pub fn tree_reader(&self) -> LazyAsyncTreeReader { + LazyAsyncTreeReader(self.tree_reader.subscribe()) + } + + /// Runs this task. The task exits on error, or when the tree reader is successfully initialized. + pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let db = tokio::select! { + db_result = create_readonly_db(self.config) => db_result?, + _ = stop_receiver.changed() => return Ok(()), + }; + let reader = AsyncTreeReader::new(db, MerkleTreeMode::Lightweight)?; + self.tree_reader.send_replace(Some(reader)); + Ok(()) + } +} diff --git a/core/node/metadata_calculator/src/metrics.rs b/core/node/metadata_calculator/src/metrics.rs index 7eb49b95afd4..c6d7094ef839 100644 --- a/core/node/metadata_calculator/src/metrics.rs +++ b/core/node/metadata_calculator/src/metrics.rs @@ -1,6 +1,6 @@ //! Metrics for `MetadataCalculator`. -use std::time::{Duration, Instant}; +use std::time::{Duration, Instant, SystemTime}; use vise::{ Buckets, DurationAsSecs, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Info, @@ -9,7 +9,6 @@ use vise::{ use zksync_config::configs::database::MerkleTreeMode; use zksync_shared_metrics::{BlockStage, APP_METRICS}; use zksync_types::block::L1BatchHeader; -use zksync_utils::time::seconds_since_epoch; use super::{MetadataCalculator, MetadataCalculatorConfig}; @@ -187,6 +186,11 @@ impl MetadataCalculator { total_logs: usize, start: Instant, ) { + let (Some(first_header), Some(last_header)) = (batch_headers.first(), batch_headers.last()) + else { + return; + }; + let elapsed = start.elapsed(); METRICS.update_tree_latency.observe(elapsed); if total_logs > 0 { @@ -205,17 +209,20 @@ impl MetadataCalculator { METRICS.log_batch.observe(total_logs); METRICS.blocks_batch.observe(batch_headers.len()); - let first_batch_number = batch_headers.first().unwrap().number.0; - let last_batch_number = batch_headers.last().unwrap().number.0; + let first_batch_number = first_header.number.0; + let last_batch_number = last_header.number.0; tracing::info!( "L1 batches #{:?} processed in tree", first_batch_number..=last_batch_number ); APP_METRICS.block_number[&BlockStage::Tree].set(last_batch_number.into()); + let duration_since_epoch = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("incorrect system time"); let latency = - seconds_since_epoch().saturating_sub(batch_headers.first().unwrap().timestamp); - APP_METRICS.block_latency[&BlockStage::Tree].observe(Duration::from_secs(latency)); + duration_since_epoch.saturating_sub(Duration::from_secs(first_header.timestamp)); + APP_METRICS.block_latency[&BlockStage::Tree].observe(latency); } } diff --git a/core/node/metadata_calculator/src/pruning.rs b/core/node/metadata_calculator/src/pruning.rs index abbf9bf6865a..4ac05e55c302 100644 --- a/core/node/metadata_calculator/src/pruning.rs +++ b/core/node/metadata_calculator/src/pruning.rs @@ -304,6 +304,7 @@ mod tests { extend_db_state_from_l1_batch( &mut storage, snapshot_recovery.l1_batch_number + 1, + snapshot_recovery.l2_block_number + 1, new_logs, ) .await; diff --git a/core/node/metadata_calculator/src/recovery/mod.rs b/core/node/metadata_calculator/src/recovery/mod.rs index dcbc0a68af92..ce7207471791 100644 --- a/core/node/metadata_calculator/src/recovery/mod.rs +++ b/core/node/metadata_calculator/src/recovery/mod.rs @@ -32,16 +32,14 @@ use std::{ }; use anyhow::Context as _; +use async_trait::async_trait; use futures::future; use tokio::sync::{watch, Mutex, Semaphore}; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_health_check::HealthUpdater; use zksync_merkle_tree::TreeEntry; use zksync_shared_metrics::{SnapshotRecoveryStage, APP_METRICS}; -use zksync_types::{ - snapshots::{uniform_hashed_keys_chunk, SnapshotRecoveryStatus}, - L2BlockNumber, H256, -}; +use zksync_types::{snapshots::uniform_hashed_keys_chunk, L1BatchNumber, L2BlockNumber, H256}; use super::{ helpers::{AsyncTree, AsyncTreeRecovery, GenericAsyncTree, MerkleTreeHealth}, @@ -54,12 +52,13 @@ mod tests; /// Handler of recovery life cycle events. This functionality is encapsulated in a trait to be able /// to control recovery behavior in tests. +#[async_trait] trait HandleRecoveryEvent: fmt::Debug + Send + Sync { fn recovery_started(&mut self, _chunk_count: u64, _recovered_chunk_count: u64) { // Default implementation does nothing } - fn chunk_recovered(&self) { + async fn chunk_recovered(&self) { // Default implementation does nothing } } @@ -82,6 +81,7 @@ impl<'a> RecoveryHealthUpdater<'a> { } } +#[async_trait] impl HandleRecoveryEvent for RecoveryHealthUpdater<'_> { fn recovery_started(&mut self, chunk_count: u64, recovered_chunk_count: u64) { self.chunk_count = chunk_count; @@ -91,7 +91,7 @@ impl HandleRecoveryEvent for RecoveryHealthUpdater<'_> { .set(recovered_chunk_count); } - fn chunk_recovered(&self) { + async fn chunk_recovered(&self) { let recovered_chunk_count = self.recovered_chunk_count.fetch_add(1, Ordering::SeqCst) + 1; let chunks_left = self.chunk_count.saturating_sub(recovered_chunk_count); tracing::info!( @@ -110,34 +110,68 @@ impl HandleRecoveryEvent for RecoveryHealthUpdater<'_> { } #[derive(Debug, Clone, Copy)] -struct SnapshotParameters { +struct InitParameters { + l1_batch: L1BatchNumber, l2_block: L2BlockNumber, - expected_root_hash: H256, + expected_root_hash: Option, log_count: u64, desired_chunk_size: u64, } -impl SnapshotParameters { +impl InitParameters { async fn new( pool: &ConnectionPool, - recovery: &SnapshotRecoveryStatus, config: &MetadataCalculatorRecoveryConfig, - ) -> anyhow::Result { - let l2_block = recovery.l2_block_number; - let expected_root_hash = recovery.l1_batch_root_hash; - + ) -> anyhow::Result> { let mut storage = pool.connection_tagged("metadata_calculator").await?; + let recovery_status = storage + .snapshot_recovery_dal() + .get_applied_snapshot_status() + .await?; + let pruning_info = storage.pruning_dal().get_pruning_info().await?; + + let (l1_batch, l2_block); + let mut expected_root_hash = None; + match (recovery_status, pruning_info.last_hard_pruned_l2_block) { + (Some(recovery), None) => { + tracing::warn!( + "Snapshot recovery {recovery:?} is present on the node, but pruning info is empty; assuming no pruning happened" + ); + l1_batch = recovery.l1_batch_number; + l2_block = recovery.l2_block_number; + expected_root_hash = Some(recovery.l1_batch_root_hash); + } + (Some(recovery), Some(pruned_l2_block)) => { + // We have both recovery and some pruning on top of it. + l2_block = pruned_l2_block.max(recovery.l2_block_number); + l1_batch = pruning_info + .last_hard_pruned_l1_batch + .with_context(|| format!("malformed pruning info: {pruning_info:?}"))?; + if l1_batch == recovery.l1_batch_number { + expected_root_hash = Some(recovery.l1_batch_root_hash); + } + } + (None, Some(pruned_l2_block)) => { + l2_block = pruned_l2_block; + l1_batch = pruning_info + .last_hard_pruned_l1_batch + .with_context(|| format!("malformed pruning info: {pruning_info:?}"))?; + } + (None, None) => return Ok(None), + }; + let log_count = storage .storage_logs_dal() .get_storage_logs_row_count(l2_block) .await?; - Ok(Self { + Ok(Some(Self { + l1_batch, l2_block, expected_root_hash, log_count, desired_chunk_size: config.desired_chunk_size, - }) + })) } fn chunk_count(&self) -> u64 { @@ -168,29 +202,27 @@ impl GenericAsyncTree { stop_receiver: &watch::Receiver, ) -> anyhow::Result> { let started_at = Instant::now(); - let (tree, snapshot_recovery) = match self { + let (tree, init_params) = match self { Self::Ready(tree) => return Ok(Some(tree)), Self::Recovering(tree) => { - let snapshot_recovery = get_snapshot_recovery(main_pool).await?.context( + let params = InitParameters::new(main_pool, config).await?.context( "Merkle tree is recovering, but Postgres doesn't contain snapshot recovery information", )?; let recovered_version = tree.recovered_version(); anyhow::ensure!( - u64::from(snapshot_recovery.l1_batch_number.0) == recovered_version, - "Snapshot L1 batch in Postgres ({snapshot_recovery:?}) differs from the recovered Merkle tree version \ + u64::from(params.l1_batch.0) == recovered_version, + "Snapshot L1 batch in Postgres ({params:?}) differs from the recovered Merkle tree version \ ({recovered_version})" ); - tracing::info!("Resuming tree recovery with status: {snapshot_recovery:?}"); - (tree, snapshot_recovery) + tracing::info!("Resuming tree recovery with status: {params:?}"); + (tree, params) } Self::Empty { db, mode } => { - if let Some(snapshot_recovery) = get_snapshot_recovery(main_pool).await? { - tracing::info!( - "Starting Merkle tree recovery with status {snapshot_recovery:?}" - ); - let l1_batch = snapshot_recovery.l1_batch_number; + if let Some(params) = InitParameters::new(main_pool, config).await? { + tracing::info!("Starting Merkle tree recovery with status {params:?}"); + let l1_batch = params.l1_batch; let tree = AsyncTreeRecovery::new(db, l1_batch.0.into(), mode, config)?; - (tree, snapshot_recovery) + (tree, params) } else { // Start the tree from scratch. The genesis block will be filled in `TreeUpdater::loop_updating_tree()`. return Ok(Some(AsyncTree::new(db, mode)?)); @@ -198,17 +230,16 @@ impl GenericAsyncTree { } }; - let snapshot = SnapshotParameters::new(main_pool, &snapshot_recovery, config).await?; tracing::debug!( - "Obtained snapshot parameters: {snapshot:?} based on recovery configuration {config:?}" + "Obtained recovery init parameters: {init_params:?} based on recovery configuration {config:?}" ); let recovery_options = RecoveryOptions { - chunk_count: snapshot.chunk_count(), + chunk_count: init_params.chunk_count(), concurrency_limit: recovery_pool.max_size() as usize, events: Box::new(RecoveryHealthUpdater::new(health_updater)), }; let tree = tree - .recover(snapshot, recovery_options, &recovery_pool, stop_receiver) + .recover(init_params, recovery_options, &recovery_pool, stop_receiver) .await?; if tree.is_some() { // Only report latency if recovery wasn't canceled @@ -223,12 +254,12 @@ impl GenericAsyncTree { impl AsyncTreeRecovery { async fn recover( mut self, - snapshot: SnapshotParameters, + init_params: InitParameters, mut options: RecoveryOptions<'_>, pool: &ConnectionPool, stop_receiver: &watch::Receiver, ) -> anyhow::Result> { - self.ensure_desired_chunk_size(snapshot.desired_chunk_size) + self.ensure_desired_chunk_size(init_params.desired_chunk_size) .await?; let start_time = Instant::now(); @@ -237,13 +268,15 @@ impl AsyncTreeRecovery { .map(|chunk_id| uniform_hashed_keys_chunk(chunk_id, chunk_count)) .collect(); tracing::info!( - "Recovering Merkle tree from Postgres snapshot in {chunk_count} chunks with max concurrency {}", + "Recovering Merkle tree from Postgres snapshot in {chunk_count} chunks with max concurrency {}. \ + Be aware that enabling node pruning during recovery will probably result in a recovery error; always disable pruning \ + until recovery is complete", options.concurrency_limit ); let mut storage = pool.connection_tagged("metadata_calculator").await?; let remaining_chunks = self - .filter_chunks(&mut storage, snapshot.l2_block, &chunks) + .filter_chunks(&mut storage, init_params.l2_block, &chunks) .await?; drop(storage); options @@ -261,9 +294,10 @@ impl AsyncTreeRecovery { .acquire() .await .context("semaphore is never closed")?; - if Self::recover_key_chunk(&tree, snapshot.l2_block, chunk, pool, stop_receiver).await? + if Self::recover_key_chunk(&tree, init_params.l2_block, chunk, pool, stop_receiver) + .await? { - options.events.chunk_recovered(); + options.events.chunk_recovered().await; } anyhow::Ok(()) }); @@ -279,13 +313,18 @@ impl AsyncTreeRecovery { let finalize_latency = RECOVERY_METRICS.latency[&RecoveryStage::Finalize].start(); let actual_root_hash = tree.root_hash().await; - anyhow::ensure!( - actual_root_hash == snapshot.expected_root_hash, - "Root hash of recovered tree {actual_root_hash:?} differs from expected root hash {:?}. \ - If pruning is enabled and the tree is initialized some time after node recovery, \ - this is caused by snapshot storage logs getting pruned; this setup is currently not supported", - snapshot.expected_root_hash - ); + if let Some(expected_root_hash) = init_params.expected_root_hash { + anyhow::ensure!( + actual_root_hash == expected_root_hash, + "Root hash of recovered tree {actual_root_hash:?} differs from expected root hash {expected_root_hash:?}" + ); + } + + // Check pruning info one last time before finalizing the tree. + let mut storage = pool.connection_tagged("metadata_calculator").await?; + Self::check_pruning_info(&mut storage, init_params.l2_block).await?; + drop(storage); + let tree = tree.finalize().await?; finalize_latency.observe(); tracing::info!( @@ -340,6 +379,21 @@ impl AsyncTreeRecovery { Ok(output) } + async fn check_pruning_info( + storage: &mut Connection<'_, Core>, + snapshot_l2_block: L2BlockNumber, + ) -> anyhow::Result<()> { + let pruning_info = storage.pruning_dal().get_pruning_info().await?; + if let Some(last_hard_pruned_l2_block) = pruning_info.last_hard_pruned_l2_block { + anyhow::ensure!( + last_hard_pruned_l2_block == snapshot_l2_block, + "Additional data was pruned compared to tree recovery L2 block #{snapshot_l2_block}: {pruning_info:?}. \ + Continuing recovery is impossible; to recover the tree, drop its RocksDB directory, stop pruning and restart recovery" + ); + } + Ok(()) + } + /// Returns `Ok(true)` if the chunk was recovered, `Ok(false)` if the recovery process was interrupted. async fn recover_key_chunk( tree: &Mutex, @@ -363,7 +417,9 @@ impl AsyncTreeRecovery { .storage_logs_dal() .get_tree_entries_for_l2_block(snapshot_l2_block, key_chunk.clone()) .await?; + Self::check_pruning_info(&mut storage, snapshot_l2_block).await?; drop(storage); + let entries_latency = entries_latency.observe(); tracing::debug!( "Loaded {} entries for chunk {key_chunk:?} in {entries_latency:?}", @@ -414,13 +470,3 @@ impl AsyncTreeRecovery { Ok(true) } } - -async fn get_snapshot_recovery( - pool: &ConnectionPool, -) -> anyhow::Result> { - let mut storage = pool.connection_tagged("metadata_calculator").await?; - Ok(storage - .snapshot_recovery_dal() - .get_applied_snapshot_status() - .await?) -} diff --git a/core/node/metadata_calculator/src/recovery/tests.rs b/core/node/metadata_calculator/src/recovery/tests.rs index 3861e8a5a84e..4b2ba578a5b6 100644 --- a/core/node/metadata_calculator/src/recovery/tests.rs +++ b/core/node/metadata_calculator/src/recovery/tests.rs @@ -1,6 +1,6 @@ //! Tests for metadata calculator snapshot recovery. -use std::{path::Path, sync::Mutex}; +use std::{collections::HashMap, path::Path, sync::Mutex}; use assert_matches::assert_matches; use tempfile::TempDir; @@ -15,7 +15,8 @@ use zksync_health_check::{CheckHealth, HealthStatus, ReactiveHealthCheck}; use zksync_merkle_tree::{domain::ZkSyncTree, recovery::PersistenceThreadHandle, TreeInstruction}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::prepare_recovery_snapshot; -use zksync_types::{L1BatchNumber, ProtocolVersionId, StorageLog}; +use zksync_storage::RocksDB; +use zksync_types::{L1BatchNumber, U256}; use super::*; use crate::{ @@ -29,10 +30,11 @@ use crate::{ #[test] fn calculating_chunk_count() { - let mut snapshot = SnapshotParameters { + let mut snapshot = InitParameters { + l1_batch: L1BatchNumber(1), l2_block: L2BlockNumber(1), log_count: 160_000_000, - expected_root_hash: H256::zero(), + expected_root_hash: Some(H256::zero()), desired_chunk_size: 200_000, }; assert_eq!(snapshot.chunk_count(), 800); @@ -57,13 +59,15 @@ async fn create_tree_recovery( async fn basic_recovery_workflow() { let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let snapshot_recovery = prepare_recovery_snapshot_with_genesis(pool.clone(), &temp_dir).await; + let root_hash = prepare_storage_logs(pool.clone(), &temp_dir).await; + prune_storage(&pool, L1BatchNumber(1)).await; + let config = MetadataCalculatorRecoveryConfig::default(); - let snapshot = SnapshotParameters::new(&pool, &snapshot_recovery, &config) + let init_params = InitParameters::new(&pool, &config) .await - .unwrap(); - - assert!(snapshot.log_count > 200); + .unwrap() + .expect("no init params"); + assert!(init_params.log_count > 200, "{init_params:?}"); let (_stop_sender, stop_receiver) = watch::channel(false); for chunk_count in [1, 4, 9, 16, 60, 256] { @@ -78,54 +82,94 @@ async fn basic_recovery_workflow() { events: Box::new(RecoveryHealthUpdater::new(&health_updater)), }; let tree = tree - .recover(snapshot, recovery_options, &pool, &stop_receiver) + .recover(init_params, recovery_options, &pool, &stop_receiver) .await .unwrap() .expect("Tree recovery unexpectedly aborted"); - assert_eq!(tree.root_hash(), snapshot_recovery.l1_batch_root_hash); + assert_eq!(tree.root_hash(), root_hash); let health = health_check.check_health().await; assert_matches!(health.status(), HealthStatus::Affected); } } -async fn prepare_recovery_snapshot_with_genesis( - pool: ConnectionPool, - temp_dir: &TempDir, -) -> SnapshotRecoveryStatus { +async fn prepare_storage_logs(pool: ConnectionPool, temp_dir: &TempDir) -> H256 { let mut storage = pool.connection().await.unwrap(); insert_genesis_batch(&mut storage, &GenesisParams::mock()) .await .unwrap(); - let mut logs = gen_storage_logs(100..300, 1).pop().unwrap(); - - // Add all logs from the genesis L1 batch to `logs` so that they cover all state keys. - let genesis_logs = storage - .storage_logs_dal() - .get_touched_slots_for_executed_l1_batch(L1BatchNumber(0)) - .await - .unwrap(); - let genesis_logs = genesis_logs - .into_iter() - .map(|(key, value)| StorageLog::new_write_log(key, value)); - logs.extend(genesis_logs); + let logs = gen_storage_logs(100..300, 1).pop().unwrap(); extend_db_state(&mut storage, vec![logs]).await; drop(storage); // Ensure that metadata for L1 batch #1 is present in the DB. let (calculator, _) = setup_calculator(&temp_dir.path().join("init"), pool, true).await; - let l1_batch_root_hash = run_calculator(calculator).await; - - SnapshotRecoveryStatus { - l1_batch_number: L1BatchNumber(1), - l1_batch_timestamp: 1, - l1_batch_root_hash, - l2_block_number: L2BlockNumber(1), - l2_block_timestamp: 1, - l2_block_hash: H256::zero(), // not used - protocol_version: ProtocolVersionId::latest(), - storage_logs_chunks_processed: vec![], - } + run_calculator(calculator).await +} + +async fn prune_storage(pool: &ConnectionPool, pruned_l1_batch: L1BatchNumber) { + // Emulate pruning batches in the storage. + let mut storage = pool.connection().await.unwrap(); + let (_, pruned_l2_block) = storage + .blocks_dal() + .get_l2_block_range_of_l1_batch(pruned_l1_batch) + .await + .unwrap() + .expect("L1 batch not present in Postgres"); + storage + .pruning_dal() + .soft_prune_batches_range(pruned_l1_batch, pruned_l2_block) + .await + .unwrap(); + let pruning_stats = storage + .pruning_dal() + .hard_prune_batches_range(pruned_l1_batch, pruned_l2_block) + .await + .unwrap(); + assert!( + pruning_stats.deleted_l1_batches > 0 && pruning_stats.deleted_l2_blocks > 0, + "{pruning_stats:?}" + ); +} + +#[tokio::test] +async fn recovery_workflow_for_partial_pruning() { + let pool = ConnectionPool::::test_pool().await; + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let recovery_root_hash = prepare_storage_logs(pool.clone(), &temp_dir).await; + + // Add more storage logs and prune initial logs. + let logs = gen_storage_logs(200..400, 5); + extend_db_state(&mut pool.connection().await.unwrap(), logs).await; + let (calculator, _) = setup_calculator(&temp_dir.path().join("init"), pool.clone(), true).await; + let final_root_hash = run_calculator(calculator).await; + prune_storage(&pool, L1BatchNumber(1)).await; + + let tree_path = temp_dir.path().join("recovery"); + let db = create_db(mock_config(&tree_path)).await.unwrap(); + let tree = GenericAsyncTree::Empty { + db, + mode: MerkleTreeMode::Lightweight, + }; + let (_stop_sender, stop_receiver) = watch::channel(false); + let tree = tree + .ensure_ready( + &MetadataCalculatorRecoveryConfig::default(), + &pool, + pool.clone(), + &ReactiveHealthCheck::new("tree").1, + &stop_receiver, + ) + .await + .unwrap() + .expect("Tree recovery unexpectedly aborted"); + + assert_eq!(tree.root_hash(), recovery_root_hash); + drop(tree); // Release exclusive lock on RocksDB + + // Check that tree operates as intended after recovery + let (calculator, _) = setup_calculator(&tree_path, pool, true).await; + assert_eq!(run_calculator(calculator).await, final_root_hash); } #[derive(Debug)] @@ -164,12 +208,13 @@ impl TestEventListener { } } +#[async_trait] impl HandleRecoveryEvent for TestEventListener { fn recovery_started(&mut self, _chunk_count: u64, recovered_chunk_count: u64) { assert_eq!(recovered_chunk_count, self.expected_recovered_chunks); } - fn chunk_recovered(&self) { + async fn chunk_recovered(&self) { let processed_chunk_count = self.processed_chunk_count.fetch_add(1, Ordering::SeqCst) + 1; if processed_chunk_count >= self.stop_threshold { self.stop_sender.send_replace(true); @@ -201,7 +246,8 @@ impl FaultToleranceCase { async fn recovery_fault_tolerance(chunk_count: u64, case: FaultToleranceCase) { let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let snapshot_recovery = prepare_recovery_snapshot_with_genesis(pool.clone(), &temp_dir).await; + let root_hash = prepare_storage_logs(pool.clone(), &temp_dir).await; + prune_storage(&pool, L1BatchNumber(1)).await; let tree_path = temp_dir.path().join("recovery"); let mut config = MetadataCalculatorRecoveryConfig::default(); @@ -217,18 +263,19 @@ async fn recovery_fault_tolerance(chunk_count: u64, case: FaultToleranceCase) { concurrency_limit: 1, events: Box::new(TestEventListener::new(1, stop_sender)), }; - let snapshot = SnapshotParameters::new(&pool, &snapshot_recovery, &config) + let init_params = InitParameters::new(&pool, &config) .await - .unwrap(); + .unwrap() + .expect("no init params"); assert!(tree - .recover(snapshot, recovery_options, &pool, &stop_receiver) + .recover(init_params, recovery_options, &pool, &stop_receiver) .await .unwrap() .is_none()); // Emulate a restart and recover 2 more chunks (or 1 + emulated persistence crash). let (mut tree, handle) = create_tree_recovery(&tree_path, L1BatchNumber(1), &config).await; - assert_ne!(tree.root_hash().await, snapshot_recovery.l1_batch_root_hash); + assert_ne!(tree.root_hash().await, root_hash); let (stop_sender, stop_receiver) = watch::channel(false); let mut event_listener = TestEventListener::new(2, stop_sender).expect_recovered_chunks(1); let expected_recovered_chunks = if matches!(case, FaultToleranceCase::ParallelWithCrash) { @@ -244,7 +291,7 @@ async fn recovery_fault_tolerance(chunk_count: u64, case: FaultToleranceCase) { events: Box::new(event_listener), }; let recovery_result = tree - .recover(snapshot, recovery_options, &pool, &stop_receiver) + .recover(init_params, recovery_options, &pool, &stop_receiver) .await; if matches!(case, FaultToleranceCase::ParallelWithCrash) { let err = format!("{:#}", recovery_result.unwrap_err()); @@ -255,7 +302,7 @@ async fn recovery_fault_tolerance(chunk_count: u64, case: FaultToleranceCase) { // Emulate another restart and recover remaining chunks. let (mut tree, _) = create_tree_recovery(&tree_path, L1BatchNumber(1), &config).await; - assert_ne!(tree.root_hash().await, snapshot_recovery.l1_batch_root_hash); + assert_ne!(tree.root_hash().await, root_hash); let (stop_sender, stop_receiver) = watch::channel(false); let recovery_options = RecoveryOptions { chunk_count, @@ -266,11 +313,11 @@ async fn recovery_fault_tolerance(chunk_count: u64, case: FaultToleranceCase) { ), }; let tree = tree - .recover(snapshot, recovery_options, &pool, &stop_receiver) + .recover(init_params, recovery_options, &pool, &stop_receiver) .await .unwrap() .expect("Tree recovery unexpectedly aborted"); - assert_eq!(tree.root_hash(), snapshot_recovery.l1_batch_root_hash); + assert_eq!(tree.root_hash(), root_hash); } #[derive(Debug)] @@ -345,6 +392,7 @@ async fn entire_recovery_workflow(case: RecoveryWorkflowCase) { extend_db_state_from_l1_batch( &mut storage, snapshot_recovery.l1_batch_number + 1, + snapshot_recovery.l2_block_number + 1, [new_logs.clone()], ) .await; @@ -376,3 +424,129 @@ async fn entire_recovery_workflow(case: RecoveryWorkflowCase) { stop_sender.send_replace(true); calculator_task.await.expect("calculator panicked").unwrap(); } + +/// `pruned_batches == 0` is a sanity check. +#[test_casing(4, [0, 1, 2, 4])] +#[tokio::test] +async fn recovery_with_further_pruning(pruned_batches: u32) { + const NEW_BATCH_COUNT: usize = 5; + + assert!( + (pruned_batches as usize) < NEW_BATCH_COUNT, + "at least 1 batch should remain in DB" + ); + + let pool = ConnectionPool::::test_pool().await; + let snapshot_logs = gen_storage_logs(100..300, 1).pop().unwrap(); + let mut storage = pool.connection().await.unwrap(); + let mut db_transaction = storage.start_transaction().await.unwrap(); + let snapshot_recovery = prepare_recovery_snapshot( + &mut db_transaction, + L1BatchNumber(23), + L2BlockNumber(42), + &snapshot_logs, + ) + .await; + + // Add some batches after recovery. + let logs = gen_storage_logs(200..400, NEW_BATCH_COUNT); + extend_db_state_from_l1_batch( + &mut db_transaction, + snapshot_recovery.l1_batch_number + 1, + snapshot_recovery.l2_block_number + 1, + logs, + ) + .await; + db_transaction.commit().await.unwrap(); + + let all_logs = storage + .storage_logs_dal() + .dump_all_storage_logs_for_tests() + .await; + assert_eq!(all_logs.len(), 400); + let initial_writes = storage + .storage_logs_dedup_dal() + .dump_all_initial_writes_for_tests() + .await; + let initial_writes: HashMap<_, _> = initial_writes + .into_iter() + .map(|write| (write.hashed_key, write.index)) + .collect(); + drop(storage); + + let instructions: Vec<_> = all_logs + .iter() + .map(|log| { + let leaf_index = initial_writes[&log.hashed_key]; + let key = U256::from_little_endian(log.hashed_key.as_bytes()); + TreeInstruction::write(key, leaf_index, log.value) + }) + .collect(); + let expected_root_hash = ZkSyncTree::process_genesis_batch(&instructions).root_hash; + + if pruned_batches > 0 { + prune_storage(&pool, snapshot_recovery.l1_batch_number + pruned_batches).await; + } + + // Create a new tree instance. It should recover and process the remaining batches. + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let (calculator, _) = setup_calculator(temp_dir.path(), pool, true).await; + assert_eq!(run_calculator(calculator).await, expected_root_hash); +} + +#[derive(Debug)] +struct PruningEventListener { + pool: ConnectionPool, + pruned_l1_batch: L1BatchNumber, +} + +#[async_trait] +impl HandleRecoveryEvent for PruningEventListener { + async fn chunk_recovered(&self) { + prune_storage(&self.pool, self.pruned_l1_batch).await; + } +} + +#[tokio::test] +async fn pruning_during_recovery_is_detected() { + let pool = ConnectionPool::::test_pool().await; + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + let logs = gen_storage_logs(200..400, 5); + extend_db_state(&mut storage, logs).await; + drop(storage); + prune_storage(&pool, L1BatchNumber(1)).await; + + let tree_path = temp_dir.path().join("recovery"); + let config = MetadataCalculatorRecoveryConfig::default(); + let (tree, _) = create_tree_recovery(&tree_path, L1BatchNumber(1), &config).await; + let (_stop_sender, stop_receiver) = watch::channel(false); + let recovery_options = RecoveryOptions { + chunk_count: 5, + concurrency_limit: 1, + events: Box::new(PruningEventListener { + pool: pool.clone(), + pruned_l1_batch: L1BatchNumber(3), + }), + }; + let init_params = InitParameters::new(&pool, &config) + .await + .unwrap() + .expect("no init params"); + + let err = tree + .recover(init_params, recovery_options, &pool, &stop_receiver) + .await + .unwrap_err(); + let err = format!("{err:#}").to_lowercase(); + assert!(err.contains("continuing recovery is impossible"), "{err}"); + + // Because of an abrupt error, terminating a RocksDB instance needs to be handled explicitly. + tokio::task::spawn_blocking(RocksDB::await_rocksdb_termination) + .await + .unwrap(); +} diff --git a/core/node/metadata_calculator/src/repair.rs b/core/node/metadata_calculator/src/repair.rs new file mode 100644 index 000000000000..9dfec4348ed6 --- /dev/null +++ b/core/node/metadata_calculator/src/repair.rs @@ -0,0 +1,258 @@ +//! High-level wrapper for the stale keys repair task. + +use std::{ + sync::{Arc, Weak}, + time::Duration, +}; + +use anyhow::Context as _; +use async_trait::async_trait; +use once_cell::sync::OnceCell; +use serde::Serialize; +use tokio::sync::watch; +use zksync_health_check::{CheckHealth, Health, HealthStatus}; +use zksync_merkle_tree::repair; + +use crate::LazyAsyncTreeReader; + +#[derive(Debug, Serialize)] +struct RepairHealthDetails { + #[serde(skip_serializing_if = "Option::is_none")] + earliest_checked_version: Option, + #[serde(skip_serializing_if = "Option::is_none")] + latest_checked_version: Option, + repaired_key_count: usize, +} + +impl From for RepairHealthDetails { + fn from(stats: repair::StaleKeysRepairStats) -> Self { + let versions = stats.checked_versions.as_ref(); + Self { + earliest_checked_version: versions.map(|versions| *versions.start()), + latest_checked_version: versions.map(|versions| *versions.end()), + repaired_key_count: stats.repaired_key_count, + } + } +} + +#[derive(Debug, Default)] +struct RepairHealthCheck { + handle: OnceCell>, +} + +#[async_trait] +impl CheckHealth for RepairHealthCheck { + fn name(&self) -> &'static str { + "tree_stale_keys_repair" + } + + async fn check_health(&self) -> Health { + let Some(weak_handle) = self.handle.get() else { + return HealthStatus::Affected.into(); + }; + let Some(handle) = weak_handle.upgrade() else { + return HealthStatus::ShutDown.into(); + }; + Health::from(HealthStatus::Ready).with_details(RepairHealthDetails::from(handle.stats())) + } +} + +/// Stale keys repair task. +#[derive(Debug)] +#[must_use = "Task should `run()` in a managed Tokio task"] +pub struct StaleKeysRepairTask { + tree_reader: LazyAsyncTreeReader, + health_check: Arc, + poll_interval: Duration, +} + +impl StaleKeysRepairTask { + pub(super) fn new(tree_reader: LazyAsyncTreeReader) -> Self { + Self { + tree_reader, + health_check: Arc::default(), + poll_interval: Duration::from_secs(60), + } + } + + pub fn health_check(&self) -> Arc { + self.health_check.clone() + } + + /// Runs this task indefinitely. + #[tracing::instrument(skip_all)] + pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let db = tokio::select! { + res = self.tree_reader.wait() => { + match res { + Some(reader) => reader.into_db(), + None => { + tracing::info!("Merkle tree dropped; shutting down stale keys repair"); + return Ok(()); + } + } + } + _ = stop_receiver.changed() => { + tracing::info!("Stop signal received before Merkle tree is initialized; shutting down stale keys repair"); + return Ok(()); + } + }; + + let (mut task, handle) = repair::StaleKeysRepairTask::new(db); + task.set_poll_interval(self.poll_interval); + let handle = Arc::new(handle); + self.health_check + .handle + .set(Arc::downgrade(&handle)) + .map_err(|_| anyhow::anyhow!("failed setting health check handle"))?; + + let mut task = tokio::task::spawn_blocking(|| task.run()); + tokio::select! { + res = &mut task => { + tracing::error!("Stale keys repair spontaneously stopped"); + res.context("repair task panicked")? + }, + _ = stop_receiver.changed() => { + tracing::info!("Stop signal received, stale keys repair is shutting down"); + // This is the only strong reference to the handle, so dropping it should signal the task to stop. + drop(handle); + task.await.context("stale keys repair task panicked")? + } + } + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use tempfile::TempDir; + use zksync_dal::{ConnectionPool, Core}; + use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; + use zksync_types::L1BatchNumber; + + use super::*; + use crate::{ + tests::{extend_db_state, gen_storage_logs, mock_config, reset_db_state}, + MetadataCalculator, + }; + + const POLL_INTERVAL: Duration = Duration::from_millis(50); + + async fn wait_for_health( + check: &dyn CheckHealth, + mut condition: impl FnMut(&Health) -> bool, + ) -> Health { + loop { + let health = check.check_health().await; + if condition(&health) { + return health; + } else if matches!( + health.status(), + HealthStatus::ShutDown | HealthStatus::Panicked + ) { + panic!("reached terminal health: {health:?}"); + } + tokio::time::sleep(POLL_INTERVAL).await; + } + } + + #[tokio::test] + async fn repair_task_basics() { + let pool = ConnectionPool::::test_pool().await; + let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); + let config = mock_config(temp_dir.path()); + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + reset_db_state(&pool, 5).await; + + let calculator = MetadataCalculator::new(config, None, pool.clone()) + .await + .unwrap(); + let reader = calculator.tree_reader(); + let mut repair_task = calculator.stale_keys_repair_task(); + repair_task.poll_interval = POLL_INTERVAL; + let health_check = repair_task.health_check(); + + let (stop_sender, stop_receiver) = watch::channel(false); + let calculator_handle = tokio::spawn(calculator.run(stop_receiver.clone())); + let repair_task_handle = tokio::spawn(repair_task.run(stop_receiver)); + wait_for_health(&health_check, |health| { + matches!(health.status(), HealthStatus::Ready) + }) + .await; + + // Wait until the calculator is initialized and then drop the reader so that it doesn't lock RocksDB. + { + let reader = reader.wait().await.unwrap(); + while reader.clone().info().await.next_l1_batch_number < L1BatchNumber(6) { + tokio::time::sleep(POLL_INTERVAL).await; + } + } + + // Wait until all tree versions have been checked. + let health = wait_for_health(&health_check, |health| { + if !matches!(health.status(), HealthStatus::Ready) { + return false; + } + let details = health.details().unwrap(); + details.get("latest_checked_version") == Some(&5.into()) + }) + .await; + let details = health.details().unwrap(); + assert_eq!(details["earliest_checked_version"], 1); + assert_eq!(details["repaired_key_count"], 0); + + stop_sender.send_replace(true); + calculator_handle.await.unwrap().unwrap(); + repair_task_handle.await.unwrap().unwrap(); + wait_for_health(&health_check, |health| { + matches!(health.status(), HealthStatus::ShutDown) + }) + .await; + + test_repair_persistence(temp_dir, pool).await; + } + + async fn test_repair_persistence(temp_dir: TempDir, pool: ConnectionPool) { + let config = mock_config(temp_dir.path()); + let calculator = MetadataCalculator::new(config, None, pool.clone()) + .await + .unwrap(); + let mut repair_task = calculator.stale_keys_repair_task(); + repair_task.poll_interval = POLL_INTERVAL; + let health_check = repair_task.health_check(); + + let (stop_sender, stop_receiver) = watch::channel(false); + let calculator_handle = tokio::spawn(calculator.run(stop_receiver.clone())); + let repair_task_handle = tokio::spawn(repair_task.run(stop_receiver)); + wait_for_health(&health_check, |health| { + matches!(health.status(), HealthStatus::Ready) + }) + .await; + + // Add more batches to the storage. + let mut storage = pool.connection().await.unwrap(); + let logs = gen_storage_logs(200..300, 5); + extend_db_state(&mut storage, logs).await; + + // Wait until new tree versions have been checked. + let health = wait_for_health(&health_check, |health| { + if !matches!(health.status(), HealthStatus::Ready) { + return false; + } + let details = health.details().unwrap(); + details.get("latest_checked_version") == Some(&10.into()) + }) + .await; + let details = health.details().unwrap(); + assert_eq!(details["earliest_checked_version"], 6); + assert_eq!(details["repaired_key_count"], 0); + + stop_sender.send_replace(true); + calculator_handle.await.unwrap().unwrap(); + repair_task_handle.await.unwrap().unwrap(); + } +} diff --git a/core/node/metadata_calculator/src/tests.rs b/core/node/metadata_calculator/src/tests.rs index b878b0c4a533..9717ce5682ce 100644 --- a/core/node/metadata_calculator/src/tests.rs +++ b/core/node/metadata_calculator/src/tests.rs @@ -23,7 +23,6 @@ use zksync_types::{ block::{L1BatchHeader, L1BatchTreeData}, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, StorageKey, StorageLog, H256, }; -use zksync_utils::u32_to_h256; use super::{ helpers::L1BatchWithLogs, GenericAsyncTree, MetadataCalculator, MetadataCalculatorConfig, @@ -696,7 +695,9 @@ async fn setup_calculator_with_options( object_store: Option>, ) -> MetadataCalculator { let mut storage = pool.connection().await.unwrap(); - if storage.blocks_dal().is_genesis_needed().await.unwrap() { + let pruning_info = storage.pruning_dal().get_pruning_info().await.unwrap(); + let has_pruning_logs = pruning_info.last_hard_pruned_l1_batch.is_some(); + if !has_pruning_logs && storage.blocks_dal().is_genesis_needed().await.unwrap() { insert_genesis_batch(&mut storage, &GenesisParams::mock()) .await .unwrap(); @@ -782,13 +783,26 @@ pub(super) async fn extend_db_state( .await .unwrap() .expect("no L1 batches in Postgres"); - extend_db_state_from_l1_batch(&mut storage, sealed_l1_batch + 1, new_logs).await; + let sealed_l2_block = storage + .blocks_dal() + .get_sealed_l2_block_number() + .await + .unwrap() + .expect("no L2 blocks in Postgres"); + extend_db_state_from_l1_batch( + &mut storage, + sealed_l1_batch + 1, + sealed_l2_block + 1, + new_logs, + ) + .await; storage.commit().await.unwrap(); } pub(super) async fn extend_db_state_from_l1_batch( storage: &mut Connection<'_, Core>, next_l1_batch: L1BatchNumber, + mut next_l2_block: L2BlockNumber, new_logs: impl IntoIterator>, ) { assert!(storage.in_transaction(), "must be called in DB transaction"); @@ -797,8 +811,7 @@ pub(super) async fn extend_db_state_from_l1_batch( let header = create_l1_batch(idx); let batch_number = header.number; // Assumes that L1 batch consists of only one L2 block. - let l2_block_header = create_l2_block(idx); - let l2_block_number = l2_block_header.number; + let l2_block_header = create_l2_block(next_l2_block.0); storage .blocks_dal() @@ -812,7 +825,7 @@ pub(super) async fn extend_db_state_from_l1_batch( .unwrap(); storage .storage_logs_dal() - .insert_storage_logs(l2_block_number, &batch_logs) + .insert_storage_logs(next_l2_block, &batch_logs) .await .unwrap(); storage @@ -831,6 +844,8 @@ pub(super) async fn extend_db_state_from_l1_batch( .await .unwrap(); insert_initial_writes_for_batch(storage, batch_number).await; + + next_l2_block += 1; } } @@ -888,9 +903,9 @@ pub(crate) fn gen_storage_logs( let proof_keys = accounts.iter().flat_map(|&account| { account_keys .clone() - .map(move |i| StorageKey::new(account, u32_to_h256(i))) + .map(move |i| StorageKey::new(account, H256::from_low_u64_be(i.into()))) }); - let proof_values = indices.map(u32_to_h256); + let proof_values = indices.map(|i| H256::from_low_u64_be(i.into())); let logs: Vec<_> = proof_keys .zip(proof_values) diff --git a/core/node/metadata_calculator/src/updater.rs b/core/node/metadata_calculator/src/updater.rs index e2acf62dea8a..17fd5d900eab 100644 --- a/core/node/metadata_calculator/src/updater.rs +++ b/core/node/metadata_calculator/src/updater.rs @@ -152,10 +152,6 @@ impl TreeUpdater { // right away without having to implement dedicated code. if let Some(object_key) = &object_key { - storage - .tee_verifier_input_producer_dal() - .create_tee_verifier_input_producer_job(l1_batch_number) - .await?; // Save the proof generation details to Postgres storage .proof_generation_dal() diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 1df47e775539..d85f3dc7c8e9 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -45,7 +45,6 @@ zksync_node_sync.workspace = true zksync_node_api_server.workspace = true zksync_node_consensus.workspace = true zksync_contract_verification_server.workspace = true -zksync_tee_verifier_input_producer.workspace = true zksync_queued_job_processor.workspace = true zksync_reorg_detector.workspace = true zksync_vm_runner.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/contract_verification_api.rs b/core/node/node_framework/src/implementations/layers/contract_verification_api.rs index 3f1f76cc1c12..2ca7cc25a1fd 100644 --- a/core/node/node_framework/src/implementations/layers/contract_verification_api.rs +++ b/core/node/node_framework/src/implementations/layers/contract_verification_api.rs @@ -69,7 +69,7 @@ impl Task for ContractVerificationApiTask { zksync_contract_verification_server::start_server( self.master_pool, self.replica_pool, - self.config, + self.config.bind_addr(), stop_receiver.0, ) .await diff --git a/core/node/node_framework/src/implementations/layers/da_clients/celestia.rs b/core/node/node_framework/src/implementations/layers/da_clients/celestia.rs new file mode 100644 index 000000000000..69f5553d4da8 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/da_clients/celestia.rs @@ -0,0 +1,46 @@ +use zksync_config::{configs::da_client::celestia::CelestiaSecrets, CelestiaConfig}; +use zksync_da_client::DataAvailabilityClient; +use zksync_da_clients::celestia::CelestiaClient; + +use crate::{ + implementations::resources::da_client::DAClientResource, + wiring_layer::{WiringError, WiringLayer}, + IntoContext, +}; + +#[derive(Debug)] +pub struct CelestiaWiringLayer { + config: CelestiaConfig, + secrets: CelestiaSecrets, +} + +impl CelestiaWiringLayer { + pub fn new(config: CelestiaConfig, secrets: CelestiaSecrets) -> Self { + Self { config, secrets } + } +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub client: DAClientResource, +} + +#[async_trait::async_trait] +impl WiringLayer for CelestiaWiringLayer { + type Input = (); + type Output = Output; + + fn layer_name(&self) -> &'static str { + "celestia_client_layer" + } + + async fn wire(self, _input: Self::Input) -> Result { + let client: Box = + Box::new(CelestiaClient::new(self.config, self.secrets).await?); + + Ok(Self::Output { + client: DAClientResource(client), + }) + } +} diff --git a/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs new file mode 100644 index 000000000000..d5391ee433f9 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs @@ -0,0 +1,46 @@ +use zksync_config::{configs::da_client::eigen::EigenSecrets, EigenConfig}; +use zksync_da_client::DataAvailabilityClient; +use zksync_da_clients::eigen::EigenClient; + +use crate::{ + implementations::resources::da_client::DAClientResource, + wiring_layer::{WiringError, WiringLayer}, + IntoContext, +}; + +#[derive(Debug)] +pub struct EigenWiringLayer { + config: EigenConfig, + secrets: EigenSecrets, +} + +impl EigenWiringLayer { + pub fn new(config: EigenConfig, secrets: EigenSecrets) -> Self { + Self { config, secrets } + } +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub client: DAClientResource, +} + +#[async_trait::async_trait] +impl WiringLayer for EigenWiringLayer { + type Input = (); + type Output = Output; + + fn layer_name(&self) -> &'static str { + "eigen_client_layer" + } + + async fn wire(self, _input: Self::Input) -> Result { + let client: Box = + Box::new(EigenClient::new(self.config, self.secrets).await?); + + Ok(Self::Output { + client: DAClientResource(client), + }) + } +} diff --git a/core/node/node_framework/src/implementations/layers/da_clients/mod.rs b/core/node/node_framework/src/implementations/layers/da_clients/mod.rs index 48311ce4c3f2..c7865c74f3b1 100644 --- a/core/node/node_framework/src/implementations/layers/da_clients/mod.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/mod.rs @@ -1,3 +1,5 @@ pub mod avail; +pub mod celestia; +pub mod eigen; pub mod no_da; pub mod object_store; diff --git a/core/node/node_framework/src/implementations/layers/gas_adjuster.rs b/core/node/node_framework/src/implementations/layers/gas_adjuster.rs index 229700289a71..241c4d829beb 100644 --- a/core/node/node_framework/src/implementations/layers/gas_adjuster.rs +++ b/core/node/node_framework/src/implementations/layers/gas_adjuster.rs @@ -1,8 +1,9 @@ use std::sync::Arc; use anyhow::Context; -use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig, GenesisConfig}; +use zksync_config::{GasAdjusterConfig, GenesisConfig}; use zksync_node_fee_model::l1_gas_price::GasAdjuster; +use zksync_types::pubdata_da::PubdataSendingMode; use crate::{ implementations::resources::{ diff --git a/core/node/node_framework/src/implementations/layers/l1_gas.rs b/core/node/node_framework/src/implementations/layers/l1_gas.rs index 35c4bc3fc205..28f81bb45438 100644 --- a/core/node/node_framework/src/implementations/layers/l1_gas.rs +++ b/core/node/node_framework/src/implementations/layers/l1_gas.rs @@ -1,8 +1,8 @@ use std::sync::Arc; -use zksync_config::configs::chain::StateKeeperConfig; +use zksync_config::configs::chain::{FeeModelVersion, StateKeeperConfig}; use zksync_node_fee_model::{ApiFeeInputProvider, MainNodeFeeInputProvider}; -use zksync_types::fee_model::FeeModelConfig; +use zksync_types::fee_model::{FeeModelConfig, FeeModelConfigV1, FeeModelConfigV2}; use crate::{ implementations::resources::{ @@ -20,7 +20,7 @@ use crate::{ /// Adds several resources that depend on L1 gas price. #[derive(Debug)] pub struct L1GasLayer { - state_keeper_config: StateKeeperConfig, + fee_model_config: FeeModelConfig, } #[derive(Debug, FromContext)] @@ -42,9 +42,25 @@ pub struct Output { } impl L1GasLayer { - pub fn new(state_keeper_config: StateKeeperConfig) -> Self { + pub fn new(state_keeper_config: &StateKeeperConfig) -> Self { Self { - state_keeper_config, + fee_model_config: Self::map_config(state_keeper_config), + } + } + + fn map_config(state_keeper_config: &StateKeeperConfig) -> FeeModelConfig { + match state_keeper_config.fee_model_version { + FeeModelVersion::V1 => FeeModelConfig::V1(FeeModelConfigV1 { + minimal_l2_gas_price: state_keeper_config.minimal_l2_gas_price, + }), + FeeModelVersion::V2 => FeeModelConfig::V2(FeeModelConfigV2 { + minimal_l2_gas_price: state_keeper_config.minimal_l2_gas_price, + compute_overhead_part: state_keeper_config.compute_overhead_part, + pubdata_overhead_part: state_keeper_config.pubdata_overhead_part, + batch_overhead_l1_gas: state_keeper_config.batch_overhead_l1_gas, + max_gas_per_batch: state_keeper_config.max_gas_per_batch, + max_pubdata_per_batch: state_keeper_config.max_pubdata_per_batch, + }), } } } @@ -64,7 +80,7 @@ impl WiringLayer for L1GasLayer { let main_fee_input_provider = Arc::new(MainNodeFeeInputProvider::new( input.gas_adjuster.0.clone(), ratio_provider.0, - FeeModelConfig::from_state_keeper_config(&self.state_keeper_config), + self.fee_model_config, )); let replica_pool = input.replica_pool.get().await?; diff --git a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs index 827ec69d9427..45aa320786ef 100644 --- a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs +++ b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs @@ -7,7 +7,8 @@ use std::{ use anyhow::Context as _; use zksync_config::configs::{api::MerkleTreeApiConfig, database::MerkleTreeMode}; use zksync_metadata_calculator::{ - LazyAsyncTreeReader, MerkleTreePruningTask, MetadataCalculator, MetadataCalculatorConfig, + LazyAsyncTreeReader, MerkleTreePruningTask, MerkleTreeReaderConfig, MetadataCalculator, + MetadataCalculatorConfig, StaleKeysRepairTask, TreeReaderTask, }; use zksync_storage::RocksDB; @@ -19,7 +20,7 @@ use crate::{ web3_api::TreeApiClientResource, }, service::{ShutdownHook, StopReceiver}, - task::{Task, TaskId}, + task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, FromContext, IntoContext, }; @@ -30,6 +31,7 @@ pub struct MetadataCalculatorLayer { config: MetadataCalculatorConfig, tree_api_config: Option, pruning_config: Option, + stale_keys_repair_enabled: bool, } #[derive(Debug, FromContext)] @@ -55,6 +57,9 @@ pub struct Output { /// Only provided if configuration is provided. #[context(task)] pub pruning_task: Option, + /// Only provided if enabled in the config. + #[context(task)] + pub stale_keys_repair_task: Option, pub rocksdb_shutdown_hook: ShutdownHook, } @@ -64,6 +69,7 @@ impl MetadataCalculatorLayer { config, tree_api_config: None, pruning_config: None, + stale_keys_repair_enabled: false, } } @@ -76,6 +82,11 @@ impl MetadataCalculatorLayer { self.pruning_config = Some(pruning_config); self } + + pub fn with_stale_keys_repair(mut self) -> Self { + self.stale_keys_repair_enabled = true; + self + } } #[async_trait::async_trait] @@ -140,6 +151,12 @@ impl WiringLayer for MetadataCalculatorLayer { ) .transpose()?; + let stale_keys_repair_task = if self.stale_keys_repair_enabled { + Some(metadata_calculator.stale_keys_repair_task()) + } else { + None + }; + let tree_api_client = TreeApiClientResource(Arc::new(metadata_calculator.tree_reader())); let rocksdb_shutdown_hook = ShutdownHook::new("rocksdb_terminaton", async { @@ -154,6 +171,7 @@ impl WiringLayer for MetadataCalculatorLayer { tree_api_client, tree_api_task, pruning_task, + stale_keys_repair_task, rocksdb_shutdown_hook, }) } @@ -195,6 +213,17 @@ impl Task for TreeApiTask { } } +#[async_trait::async_trait] +impl Task for StaleKeysRepairTask { + fn id(&self) -> TaskId { + "merkle_tree_stale_keys_repair_task".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} + #[async_trait::async_trait] impl Task for MerkleTreePruningTask { fn id(&self) -> TaskId { @@ -205,3 +234,65 @@ impl Task for MerkleTreePruningTask { (*self).run(stop_receiver.0).await } } + +/// Mutually exclusive with [`MetadataCalculatorLayer`]. +#[derive(Debug)] +pub struct TreeApiServerLayer { + config: MerkleTreeReaderConfig, + api_config: MerkleTreeApiConfig, +} + +impl TreeApiServerLayer { + pub fn new(config: MerkleTreeReaderConfig, api_config: MerkleTreeApiConfig) -> Self { + Self { config, api_config } + } +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct TreeApiServerOutput { + tree_api_client: TreeApiClientResource, + #[context(task)] + tree_reader_task: TreeReaderTask, + #[context(task)] + tree_api_task: TreeApiTask, +} + +#[async_trait::async_trait] +impl WiringLayer for TreeApiServerLayer { + type Input = (); + type Output = TreeApiServerOutput; + + fn layer_name(&self) -> &'static str { + "tree_api_server" + } + + async fn wire(self, (): Self::Input) -> Result { + let tree_reader_task = TreeReaderTask::new(self.config); + let bind_addr = (Ipv4Addr::UNSPECIFIED, self.api_config.port).into(); + let tree_api_task = TreeApiTask { + bind_addr, + tree_reader: tree_reader_task.tree_reader(), + }; + Ok(TreeApiServerOutput { + tree_api_client: TreeApiClientResource(Arc::new(tree_reader_task.tree_reader())), + tree_api_task, + tree_reader_task, + }) + } +} + +#[async_trait::async_trait] +impl Task for TreeReaderTask { + fn kind(&self) -> TaskKind { + TaskKind::OneshotTask + } + + fn id(&self) -> TaskId { + "merkle_tree_reader_task".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 75828da19023..11a62c9333b2 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -33,7 +33,6 @@ pub mod reorg_detector; pub mod sigint; pub mod state_keeper; pub mod sync_state_updater; -pub mod tee_verifier_input_producer; pub mod tree_data_fetcher; pub mod validate_chain_ids; pub mod vm_runner; diff --git a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs index b53ff73c1a04..3e1269caa4e4 100644 --- a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs +++ b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core}; use zksync_object_store::ObjectStore; -use zksync_types::commitment::L1BatchCommitmentMode; +use zksync_types::{commitment::L1BatchCommitmentMode, L2ChainId}; use crate::{ implementations::resources::{ @@ -21,6 +21,7 @@ use crate::{ pub struct ProofDataHandlerLayer { proof_data_handler_config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, } #[derive(Debug, FromContext)] @@ -41,10 +42,12 @@ impl ProofDataHandlerLayer { pub fn new( proof_data_handler_config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, ) -> Self { Self { proof_data_handler_config, commitment_mode, + l2_chain_id, } } } @@ -67,6 +70,7 @@ impl WiringLayer for ProofDataHandlerLayer { blob_store, main_pool, commitment_mode: self.commitment_mode, + l2_chain_id: self.l2_chain_id, }; Ok(Output { task }) @@ -79,6 +83,7 @@ pub struct ProofDataHandlerTask { blob_store: Arc, main_pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, } #[async_trait::async_trait] @@ -93,6 +98,7 @@ impl Task for ProofDataHandlerTask { self.blob_store, self.main_pool, self.commitment_mode, + self.l2_chain_id, stop_receiver.0, ) .await diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs index 31b76550767c..2c23f5aa9a17 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs @@ -8,6 +8,7 @@ use zksync_types::L2ChainId; use crate::{ implementations::resources::{ action_queue::ActionQueueSenderResource, + healthcheck::AppHealthCheckResource, main_node_client::MainNodeClientResource, pools::{MasterPool, PoolResource}, state_keeper::{ConditionalSealerResource, StateKeeperIOResource}, @@ -26,6 +27,7 @@ pub struct ExternalIOLayer { #[derive(Debug, FromContext)] #[context(crate = crate)] pub struct Input { + pub app_health: AppHealthCheckResource, pub pool: PoolResource, pub main_node_client: MainNodeClientResource, } @@ -57,6 +59,10 @@ impl WiringLayer for ExternalIOLayer { async fn wire(self, input: Self::Input) -> Result { // Create `SyncState` resource. let sync_state = SyncState::default(); + let app_health = &input.app_health.0; + app_health + .insert_custom_component(Arc::new(sync_state.clone())) + .map_err(WiringError::internal)?; // Create `ActionQueueSender` resource. let (action_queue_sender, action_queue) = ActionQueue::new(); diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs index ec2c415b9bbd..77992f34c7f5 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs @@ -4,7 +4,7 @@ use zksync_config::configs::{ wallets, }; use zksync_state_keeper::{MempoolFetcher, MempoolGuard, MempoolIO, SequencerSealer}; -use zksync_types::L2ChainId; +use zksync_types::{commitment::L1BatchCommitmentMode, Address, L2ChainId}; use crate::{ implementations::resources::{ @@ -39,6 +39,8 @@ pub struct MempoolIOLayer { state_keeper_config: StateKeeperConfig, mempool_config: MempoolConfig, wallets: wallets::StateKeeper, + l2_da_validator_addr: Option
, + l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, } #[derive(Debug, FromContext)] @@ -63,12 +65,16 @@ impl MempoolIOLayer { state_keeper_config: StateKeeperConfig, mempool_config: MempoolConfig, wallets: wallets::StateKeeper, + l2_da_validator_addr: Option
, + l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, ) -> Self { Self { zksync_network_id, state_keeper_config, mempool_config, wallets, + l2_da_validator_addr, + l1_batch_commit_data_generator_mode, } } @@ -129,6 +135,8 @@ impl WiringLayer for MempoolIOLayer { self.wallets.fee_account.address(), self.mempool_config.delay_interval(), self.zksync_network_id, + self.l2_da_validator_addr, + self.l1_batch_commit_data_generator_mode, )?; // Create sealer. diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs index 5f63e4e19475..1a07591c1cd9 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs @@ -35,7 +35,7 @@ use crate::{ /// - `L2BlockSealerTask` #[derive(Debug)] pub struct OutputHandlerLayer { - l2_shared_bridge_addr: Address, + l2_legacy_shared_bridge_addr: Option
, l2_block_seal_queue_capacity: usize, /// Whether transactions should be pre-inserted to DB. /// Should be set to `true` for EN's IO as EN doesn't store transactions in DB @@ -63,9 +63,12 @@ pub struct Output { } impl OutputHandlerLayer { - pub fn new(l2_shared_bridge_addr: Address, l2_block_seal_queue_capacity: usize) -> Self { + pub fn new( + l2_legacy_shared_bridge_addr: Option
, + l2_block_seal_queue_capacity: usize, + ) -> Self { Self { - l2_shared_bridge_addr, + l2_legacy_shared_bridge_addr, l2_block_seal_queue_capacity, pre_insert_txs: false, protective_reads_persistence_enabled: false, @@ -103,11 +106,13 @@ impl WiringLayer for OutputHandlerLayer { .get_custom(L2BlockSealProcess::subtasks_len()) .await .context("Get master pool")?; + let (mut persistence, l2_block_sealer) = StateKeeperPersistence::new( persistence_pool.clone(), - self.l2_shared_bridge_addr, + self.l2_legacy_shared_bridge_addr, self.l2_block_seal_queue_capacity, - ); + ) + .await?; if self.pre_insert_txs { persistence = persistence.with_tx_insertion(); } diff --git a/core/node/node_framework/src/implementations/layers/sync_state_updater.rs b/core/node/node_framework/src/implementations/layers/sync_state_updater.rs index 1f86b43f7a5b..dd2652dfddb0 100644 --- a/core/node/node_framework/src/implementations/layers/sync_state_updater.rs +++ b/core/node/node_framework/src/implementations/layers/sync_state_updater.rs @@ -1,9 +1,12 @@ +use std::sync::Arc; + use zksync_dal::{ConnectionPool, Core}; use zksync_node_sync::SyncState; use zksync_web3_decl::client::{DynClient, L2}; use crate::{ implementations::resources::{ + healthcheck::AppHealthCheckResource, main_node_client::MainNodeClientResource, pools::{MasterPool, PoolResource}, sync_state::SyncStateResource, @@ -24,6 +27,7 @@ pub struct SyncStateUpdaterLayer; pub struct Input { /// Fetched to check whether the `SyncState` was already provided by another layer. pub sync_state: Option, + pub app_health: AppHealthCheckResource, pub master_pool: PoolResource, pub main_node_client: MainNodeClientResource, } @@ -62,6 +66,10 @@ impl WiringLayer for SyncStateUpdaterLayer { let MainNodeClientResource(main_node_client) = input.main_node_client; let sync_state = SyncState::default(); + let app_health = &input.app_health.0; + app_health + .insert_custom_component(Arc::new(sync_state.clone())) + .map_err(WiringError::internal)?; Ok(Output { sync_state: Some(sync_state.clone().into()), diff --git a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs b/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs deleted file mode 100644 index 68789082a226..000000000000 --- a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs +++ /dev/null @@ -1,69 +0,0 @@ -use zksync_queued_job_processor::JobProcessor; -use zksync_tee_verifier_input_producer::TeeVerifierInputProducer; -use zksync_types::L2ChainId; - -use crate::{ - implementations::resources::{ - object_store::ObjectStoreResource, - pools::{MasterPool, PoolResource}, - }, - service::StopReceiver, - task::{Task, TaskId}, - wiring_layer::{WiringError, WiringLayer}, - FromContext, IntoContext, -}; - -/// Wiring layer for [`TeeVerifierInputProducer`]. -#[derive(Debug)] -pub struct TeeVerifierInputProducerLayer { - l2_chain_id: L2ChainId, -} - -impl TeeVerifierInputProducerLayer { - pub fn new(l2_chain_id: L2ChainId) -> Self { - Self { l2_chain_id } - } -} - -#[derive(Debug, FromContext)] -#[context(crate = crate)] -pub struct Input { - pub master_pool: PoolResource, - pub object_store: ObjectStoreResource, -} - -#[derive(Debug, IntoContext)] -#[context(crate = crate)] -pub struct Output { - #[context(task)] - pub task: TeeVerifierInputProducer, -} - -#[async_trait::async_trait] -impl WiringLayer for TeeVerifierInputProducerLayer { - type Input = Input; - type Output = Output; - - fn layer_name(&self) -> &'static str { - "tee_verifier_input_producer_layer" - } - - async fn wire(self, input: Self::Input) -> Result { - let pool = input.master_pool.get().await?; - let ObjectStoreResource(object_store) = input.object_store; - let task = TeeVerifierInputProducer::new(pool, object_store, self.l2_chain_id).await?; - - Ok(Output { task }) - } -} - -#[async_trait::async_trait] -impl Task for TeeVerifierInputProducer { - fn id(&self) -> TaskId { - "tee_verifier_input_producer".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0, None).await - } -} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs index ba1a69e23bb6..023ef1059c79 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs @@ -6,7 +6,7 @@ use zksync_node_api_server::{ tx_sender::{SandboxExecutorOptions, TxSenderBuilder, TxSenderConfig}, }; use zksync_state::{PostgresStorageCaches, PostgresStorageCachesTask}; -use zksync_types::{AccountTreeId, Address}; +use zksync_types::{vm::FastVmMode, AccountTreeId, Address}; use zksync_web3_decl::{ client::{DynClient, L2}, jsonrpsee, @@ -60,6 +60,7 @@ pub struct TxSenderLayer { postgres_storage_caches_config: PostgresStorageCachesConfig, max_vm_concurrency: usize, whitelisted_tokens_for_aa_cache: bool, + vm_mode: FastVmMode, } #[derive(Debug, FromContext)] @@ -95,6 +96,7 @@ impl TxSenderLayer { postgres_storage_caches_config, max_vm_concurrency, whitelisted_tokens_for_aa_cache: false, + vm_mode: FastVmMode::Old, } } @@ -106,6 +108,12 @@ impl TxSenderLayer { self.whitelisted_tokens_for_aa_cache = value; self } + + /// Sets the fast VM modes used for all supported operations. + pub fn with_vm_mode(mut self, mode: FastVmMode) -> Self { + self.vm_mode = mode; + self + } } #[async_trait::async_trait] @@ -151,12 +159,13 @@ impl WiringLayer for TxSenderLayer { // TODO (BFT-138): Allow to dynamically reload API contracts let config = self.tx_sender_config; - let executor_options = SandboxExecutorOptions::new( + let mut executor_options = SandboxExecutorOptions::new( config.chain_id, AccountTreeId::new(config.fee_account_addr), config.validation_computational_gas_limit, ) .await?; + executor_options.set_fast_vm_mode(self.vm_mode); // Build `TxSender`. let mut tx_sender = TxSenderBuilder::new(config, replica_pool, tx_sink); diff --git a/core/node/node_framework/src/service/error.rs b/core/node/node_framework/src/service/error.rs index 890cc6b7d4b6..66a1c13e8730 100644 --- a/core/node/node_framework/src/service/error.rs +++ b/core/node/node_framework/src/service/error.rs @@ -1,20 +1,41 @@ +use std::fmt; + use crate::{task::TaskId, wiring_layer::WiringError}; /// An error that can occur during the task lifecycle. #[derive(Debug, thiserror::Error)] pub enum TaskError { - #[error("Task {0} failed: {1}")] + #[error("Task {0} failed: {1:#}")] TaskFailed(TaskId, anyhow::Error), #[error("Task {0} panicked: {1}")] TaskPanicked(TaskId, String), #[error("Shutdown for task {0} timed out")] TaskShutdownTimedOut(TaskId), - #[error("Shutdown hook {0} failed: {1}")] + #[error("Shutdown hook {0} failed: {1:#}")] ShutdownHookFailed(TaskId, anyhow::Error), #[error("Shutdown hook {0} timed out")] ShutdownHookTimedOut(TaskId), } +/// Wrapper of a list of errors with a reasonable formatting. +pub struct TaskErrors(pub Vec); + +impl From> for TaskErrors { + fn from(errs: Vec) -> Self { + Self(errs) + } +} + +impl fmt::Debug for TaskErrors { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0 + .iter() + .map(|err| format!("{err:#}")) + .collect::>() + .fmt(f) + } +} + /// An error that can occur during the service lifecycle. #[derive(Debug, thiserror::Error)] pub enum ZkStackServiceError { @@ -25,5 +46,5 @@ pub enum ZkStackServiceError { #[error("One or more wiring layers failed to initialize: {0:?}")] Wiring(Vec<(String, WiringError)>), #[error("One or more tasks failed: {0:?}")] - Task(Vec), + Task(TaskErrors), } diff --git a/core/node/node_framework/src/service/mod.rs b/core/node/node_framework/src/service/mod.rs index b6d420093541..00e50f7dc3b6 100644 --- a/core/node/node_framework/src/service/mod.rs +++ b/core/node/node_framework/src/service/mod.rs @@ -171,7 +171,7 @@ impl ZkStackService { if self.errors.is_empty() { Ok(()) } else { - Err(ZkStackServiceError::Task(self.errors)) + Err(ZkStackServiceError::Task(self.errors.into())) } } diff --git a/core/node/node_sync/Cargo.toml b/core/node/node_sync/Cargo.toml index ccfc8dd8a4e9..28dbf6b3150e 100644 --- a/core/node/node_sync/Cargo.toml +++ b/core/node/node_sync/Cargo.toml @@ -21,9 +21,9 @@ zksync_state_keeper.workspace = true zksync_shared_metrics.workspace = true zksync_web3_decl.workspace = true zksync_health_check.workspace = true -zksync_utils.workspace = true zksync_eth_client.workspace = true zksync_concurrency.workspace = true +zksync_consensus_roles.workspace = true vise.workspace = true zksync_vm_executor.workspace = true @@ -43,3 +43,4 @@ zksync_node_test_utils.workspace = true assert_matches.workspace = true once_cell.workspace = true test-casing.workspace = true +backon.workspace = true diff --git a/core/node/node_sync/src/client.rs b/core/node/node_sync/src/client.rs index ee89db10ddd1..ce6e08e29227 100644 --- a/core/node/node_sync/src/client.rs +++ b/core/node/node_sync/src/client.rs @@ -8,7 +8,8 @@ use zksync_health_check::{CheckHealth, Health, HealthStatus}; use zksync_system_constants::ACCOUNT_CODE_STORAGE_ADDRESS; use zksync_types::{ api::{self, en}, - get_code_key, Address, L2BlockNumber, ProtocolVersionId, H256, U64, + bytecode::BytecodeHash, + get_code_key, h256_to_u256, Address, L2BlockNumber, ProtocolVersionId, H256, U64, }; use zksync_web3_decl::{ client::{DynClient, L2}, @@ -57,7 +58,7 @@ impl MainNodeClient for Box> { .with_arg("hash", &hash) .await?; if let Some(bytecode) = &bytecode { - let actual_bytecode_hash = zksync_utils::bytecode::hash_bytecode(bytecode); + let actual_bytecode_hash = BytecodeHash::for_bytecode(bytecode).value(); if actual_bytecode_hash != hash { return Err(EnrichedClientError::custom( "Got invalid base system contract bytecode from main node", @@ -81,7 +82,7 @@ impl MainNodeClient for Box> { let code_hash = self .get_storage_at( ACCOUNT_CODE_STORAGE_ADDRESS, - zksync_utils::h256_to_u256(*code_key.key()), + h256_to_u256(*code_key.key()), Some(GENESIS_BLOCK), ) .rpc_context("get_storage_at") diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index 10fb2925015f..d3d908cfc169 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -20,7 +20,6 @@ use zksync_types::{ protocol_version::{ProtocolSemanticVersion, VersionPatch}, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, }; -use zksync_utils::bytes_to_be_words; use zksync_vm_executor::storage::L1BatchParamsProvider; use super::{ @@ -75,7 +74,7 @@ impl ExternalIO { Ok(match bytecode { Some(bytecode) => SystemContractCode { - code: bytes_to_be_words(bytecode), + code: bytecode, hash, }, None => { @@ -98,12 +97,69 @@ impl ExternalIO { ) .await?; SystemContractCode { - code: bytes_to_be_words(contract_bytecode), + code: contract_bytecode, hash, } } }) } + + async fn ensure_protocol_version_is_saved( + &self, + protocol_version: ProtocolVersionId, + ) -> anyhow::Result<()> { + let base_system_contract_hashes = self + .pool + .connection_tagged("sync_layer") + .await? + .protocol_versions_dal() + .get_base_system_contract_hashes_by_version_id(protocol_version as u16) + .await?; + if base_system_contract_hashes.is_some() { + return Ok(()); + } + tracing::info!("Fetching protocol version {protocol_version:?} from the main node"); + + let protocol_version = self + .main_node_client + .fetch_protocol_version(protocol_version) + .await + .context("failed to fetch protocol version from the main node")? + .context("protocol version is missing on the main node")?; + let minor = protocol_version + .minor_version() + .context("Missing minor protocol version")?; + let bootloader_code_hash = protocol_version + .bootloader_code_hash() + .context("Missing bootloader code hash")?; + let default_account_code_hash = protocol_version + .default_account_code_hash() + .context("Missing default account code hash")?; + let evm_emulator_code_hash = protocol_version.evm_emulator_code_hash(); + let l2_system_upgrade_tx_hash = protocol_version.l2_system_upgrade_tx_hash(); + self.pool + .connection_tagged("sync_layer") + .await? + .protocol_versions_dal() + .save_protocol_version( + ProtocolSemanticVersion { + minor: minor + .try_into() + .context("cannot convert protocol version")?, + patch: VersionPatch(0), + }, + protocol_version.timestamp, + Default::default(), // verification keys are unused for EN + BaseSystemContractsHashes { + bootloader: bootloader_code_hash, + default_aa: default_account_code_hash, + evm_emulator: evm_emulator_code_hash, + }, + l2_system_upgrade_tx_hash, + ) + .await?; + Ok(()) + } } impl IoSealCriteria for ExternalIO { @@ -155,6 +211,14 @@ impl StateKeeperIO for ExternalIO { ) })?; let Some(mut pending_l2_block_header) = pending_l2_block_header else { + tracing::info!( + l1_batch_number = %cursor.l1_batch, + "No pending L2 blocks found; pruning unsealed batch if exists as we need at least one L2 block to initialize" + ); + storage + .blocks_dal() + .delete_unsealed_l1_batch(cursor.l1_batch - 1) + .await?; return Ok((cursor, None)); }; @@ -186,7 +250,7 @@ impl StateKeeperIO for ExternalIO { pending_l2_block_header.set_protocol_version(protocol_version); } - let (system_env, l1_batch_env) = self + let (system_env, l1_batch_env, pubdata_params) = self .l1_batch_params_provider .load_l1_batch_params( &mut storage, @@ -209,7 +273,7 @@ impl StateKeeperIO for ExternalIO { .into_unsealed_header(Some(system_env.version)), ) .await?; - let data = load_pending_batch(&mut storage, system_env, l1_batch_env) + let data = load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) .await .with_context(|| { format!( @@ -246,6 +310,8 @@ impl StateKeeperIO for ExternalIO { cursor.next_l2_block ); + self.ensure_protocol_version_is_saved(params.protocol_version) + .await?; self.pool .connection_tagged("sync_layer") .await? @@ -253,7 +319,7 @@ impl StateKeeperIO for ExternalIO { .insert_l1_batch(UnsealedL1BatchHeader { number: cursor.l1_batch, timestamp: params.first_l2_block.timestamp, - protocol_version: None, + protocol_version: Some(params.protocol_version), fee_address: params.operator_address, fee_input: params.fee_input, }) @@ -295,6 +361,7 @@ impl StateKeeperIO for ExternalIO { async fn wait_for_next_tx( &mut self, max_wait: Duration, + _l2_block_timestamp: u64, ) -> anyhow::Result> { tracing::debug!( "Waiting for the new tx, next action is {:?}", @@ -343,63 +410,21 @@ impl StateKeeperIO for ExternalIO { .connection_tagged("sync_layer") .await? .protocol_versions_dal() - .load_base_system_contracts_by_version_id(protocol_version as u16) - .await - .context("failed loading base system contracts")?; - - if let Some(contracts) = base_system_contracts { - return Ok(contracts); - } - tracing::info!("Fetching protocol version {protocol_version:?} from the main node"); - - let protocol_version = self - .main_node_client - .fetch_protocol_version(protocol_version) - .await - .context("failed to fetch protocol version from the main node")? - .context("protocol version is missing on the main node")?; - let minor = protocol_version - .minor_version() - .context("Missing minor protocol version")?; - let bootloader_code_hash = protocol_version - .bootloader_code_hash() - .context("Missing bootloader code hash")?; - let default_account_code_hash = protocol_version - .default_account_code_hash() - .context("Missing default account code hash")?; - let evm_emulator_code_hash = protocol_version.evm_emulator_code_hash(); - let l2_system_upgrade_tx_hash = protocol_version.l2_system_upgrade_tx_hash(); - self.pool - .connection_tagged("sync_layer") + .get_base_system_contract_hashes_by_version_id(protocol_version as u16) .await? - .protocol_versions_dal() - .save_protocol_version( - ProtocolSemanticVersion { - minor: minor - .try_into() - .context("cannot convert protocol version")?, - patch: VersionPatch(0), - }, - protocol_version.timestamp, - Default::default(), // verification keys are unused for EN - BaseSystemContractsHashes { - bootloader: bootloader_code_hash, - default_aa: default_account_code_hash, - evm_emulator: evm_emulator_code_hash, - }, - l2_system_upgrade_tx_hash, - ) - .await?; + .with_context(|| { + format!("Cannot load base system contracts' hashes for {protocol_version:?}. They should already be present") + })?; let bootloader = self - .get_base_system_contract(bootloader_code_hash, cursor.next_l2_block) + .get_base_system_contract(base_system_contracts.bootloader, cursor.next_l2_block) .await .with_context(|| format!("cannot fetch bootloader code for {protocol_version:?}"))?; let default_aa = self - .get_base_system_contract(default_account_code_hash, cursor.next_l2_block) + .get_base_system_contract(base_system_contracts.default_aa, cursor.next_l2_block) .await .with_context(|| format!("cannot fetch default AA code for {protocol_version:?}"))?; - let evm_emulator = if let Some(hash) = evm_emulator_code_hash { + let evm_emulator = if let Some(hash) = base_system_contracts.evm_emulator { Some( self.get_base_system_contract(hash, cursor.next_l2_block) .await @@ -451,3 +476,98 @@ impl StateKeeperIO for ExternalIO { Ok(hash) } } + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use zksync_dal::{ConnectionPool, CoreDal}; + use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; + use zksync_state_keeper::{io::L1BatchParams, L2BlockParams, StateKeeperIO}; + use zksync_types::{ + api, fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, + H256, + }; + + use crate::{sync_action::SyncAction, testonly::MockMainNodeClient, ActionQueue, ExternalIO}; + + #[tokio::test] + async fn insert_batch_with_protocol_version() { + // Whenever ExternalIO inserts an unsealed batch into DB it should populate it with protocol + // version and make sure that it is present in the DB (i.e. fetch it from main node if not). + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + insert_genesis_batch(&mut conn, &GenesisParams::mock()) + .await + .unwrap(); + let (actions_sender, action_queue) = ActionQueue::new(); + let mut client = MockMainNodeClient::default(); + let next_protocol_version = api::ProtocolVersion { + minor_version: Some(ProtocolVersionId::next() as u16), + timestamp: 1, + bootloader_code_hash: Some(H256::repeat_byte(1)), + default_account_code_hash: Some(H256::repeat_byte(1)), + evm_emulator_code_hash: Some(H256::repeat_byte(1)), + ..api::ProtocolVersion::default() + }; + client.insert_protocol_version(next_protocol_version.clone()); + let mut external_io = ExternalIO::new( + pool.clone(), + action_queue, + Box::new(client), + L2ChainId::default(), + ) + .unwrap(); + + let (cursor, _) = external_io.initialize().await.unwrap(); + let params = L1BatchParams { + protocol_version: ProtocolVersionId::next(), + validation_computational_gas_limit: u32::MAX, + operator_address: Default::default(), + fee_input: BatchFeeInput::pubdata_independent(2, 3, 4), + first_l2_block: L2BlockParams { + timestamp: 1, + virtual_blocks: 1, + }, + pubdata_params: Default::default(), + }; + actions_sender + .push_action_unchecked(SyncAction::OpenBatch { + params: params.clone(), + number: L1BatchNumber(1), + first_l2_block_number: L2BlockNumber(1), + }) + .await + .unwrap(); + let fetched_params = external_io + .wait_for_new_batch_params(&cursor, Duration::from_secs(10)) + .await + .unwrap() + .unwrap(); + assert_eq!(fetched_params, params); + + // Verify that the next protocol version is in DB + let fetched_protocol_version = conn + .protocol_versions_dal() + .get_protocol_version_with_latest_patch(ProtocolVersionId::next()) + .await + .unwrap() + .unwrap(); + assert_eq!( + fetched_protocol_version.version.minor as u16, + next_protocol_version.minor_version.unwrap() + ); + + // Verify that the unsealed batch has protocol version + let unsealed_batch = conn + .blocks_dal() + .get_unsealed_l1_batch() + .await + .unwrap() + .unwrap(); + assert_eq!( + unsealed_batch.protocol_version, + Some(fetched_protocol_version.version.minor) + ); + } +} diff --git a/core/node/node_sync/src/fetcher.rs b/core/node/node_sync/src/fetcher.rs index 3f8558ed0ac5..9c76d1d93ca3 100644 --- a/core/node/node_sync/src/fetcher.rs +++ b/core/node/node_sync/src/fetcher.rs @@ -1,9 +1,10 @@ +use anyhow::Context; use zksync_dal::{Connection, Core, CoreDal}; use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_state_keeper::io::{common::IoCursor, L1BatchParams, L2BlockParams}; use zksync_types::{ - api::en::SyncBlock, block::L2BlockHasher, fee_model::BatchFeeInput, helpers::unix_timestamp_ms, - Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, + api::en::SyncBlock, block::L2BlockHasher, commitment::PubdataParams, fee_model::BatchFeeInput, + helpers::unix_timestamp_ms, Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, }; use super::{ @@ -51,6 +52,7 @@ pub struct FetchedBlock { pub virtual_blocks: u32, pub operator_address: Address, pub transactions: Vec, + pub pubdata_params: PubdataParams, } impl FetchedBlock { @@ -77,6 +79,14 @@ impl TryFrom for FetchedBlock { )); } + let pubdata_params = if block.protocol_version.is_pre_gateway() { + block.pubdata_params.unwrap_or_default() + } else { + block + .pubdata_params + .context("Missing `pubdata_params` for post-gateway payload")? + }; + Ok(Self { number: block.number, l1_batch_number: block.l1_batch_number, @@ -93,6 +103,7 @@ impl TryFrom for FetchedBlock { .into_iter() .map(FetchedTransaction::new) .collect(), + pubdata_params, }) } } @@ -114,8 +125,8 @@ impl IoCursorExt for IoCursor { let mut this = Self::new(storage).await?; // It's important to know whether we have opened a new batch already or just sealed the previous one. // Depending on it, we must either insert `OpenBatch` item into the queue, or not. - let unsealed_batch = storage.blocks_dal().get_unsealed_l1_batch().await?; - if unsealed_batch.is_none() { + let was_new_batch_open = storage.blocks_dal().pending_batch_exists().await?; + if !was_new_batch_open { this.l1_batch -= 1; // Should continue from the last L1 batch present in the storage } Ok(this) @@ -165,6 +176,7 @@ impl IoCursorExt for IoCursor { timestamp: block.timestamp, virtual_blocks: block.virtual_blocks, }, + pubdata_params: block.pubdata_params, }, number: block.l1_batch_number, first_l2_block_number: block.number, @@ -201,35 +213,3 @@ impl IoCursorExt for IoCursor { new_actions } } - -#[cfg(test)] -mod tests { - use zksync_dal::{ConnectionPool, Core, CoreDal}; - use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; - use zksync_state_keeper::io::IoCursor; - use zksync_types::{block::UnsealedL1BatchHeader, L1BatchNumber}; - - use crate::fetcher::IoCursorExt; - - #[tokio::test] - async fn io_cursor_recognizes_empty_unsealed_batch() -> anyhow::Result<()> { - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.connection().await.unwrap(); - insert_genesis_batch(&mut conn, &GenesisParams::mock()) - .await - .unwrap(); - conn.blocks_dal() - .insert_l1_batch(UnsealedL1BatchHeader { - number: L1BatchNumber(1), - timestamp: 1, - protocol_version: None, - fee_address: Default::default(), - fee_input: Default::default(), - }) - .await?; - - let io_cursor = IoCursor::for_fetcher(&mut conn).await?; - assert_eq!(io_cursor.l1_batch, L1BatchNumber(1)); - Ok(()) - } -} diff --git a/core/node/node_sync/src/genesis.rs b/core/node/node_sync/src/genesis.rs index 0ff8d0d448c0..7401bdd9c9d4 100644 --- a/core/node/node_sync/src/genesis.rs +++ b/core/node/node_sync/src/genesis.rs @@ -109,21 +109,18 @@ async fn fetch_base_system_contracts( let bytes = client .fetch_system_contract_by_hash(hash) .await? - .context("EVM Simulator bytecode is missing on main node")?; - Some(SystemContractCode { - code: zksync_utils::bytes_to_be_words(bytes), - hash, - }) + .context("EVM emulator bytecode is missing on main node")?; + Some(SystemContractCode { code: bytes, hash }) } else { None }; Ok(BaseSystemContracts { bootloader: SystemContractCode { - code: zksync_utils::bytes_to_be_words(bootloader_bytecode), + code: bootloader_bytecode, hash: contract_hashes.bootloader, }, default_aa: SystemContractCode { - code: zksync_utils::bytes_to_be_words(default_aa_bytecode), + code: default_aa_bytecode, hash: contract_hashes.default_aa, }, evm_emulator, diff --git a/core/node/node_sync/src/sync_action.rs b/core/node/node_sync/src/sync_action.rs index 8cb90d24fe84..897abfafb2a6 100644 --- a/core/node/node_sync/src/sync_action.rs +++ b/core/node/node_sync/src/sync_action.rs @@ -33,6 +33,18 @@ impl ActionQueueSender { Ok(()) } + /// Pushes a single action into the queue without checking validity of the sequence. + /// + /// Useful to simulate situations where only a part of the sequence was executed on the node. + #[cfg(test)] + pub async fn push_action_unchecked(&self, action: SyncAction) -> anyhow::Result<()> { + self.0 + .send(action) + .await + .map_err(|_| anyhow::anyhow!("node action processor stopped"))?; + Ok(()) + } + /// Checks whether the action sequence is valid. /// Returned error is meant to be used as a panic message, since an invalid sequence represents an unrecoverable /// error. This function itself does not panic for the ease of testing. @@ -186,6 +198,7 @@ mod tests { timestamp: 1, virtual_blocks: 1, }, + pubdata_params: Default::default(), }, number: L1BatchNumber(1), first_l2_block_number: L2BlockNumber(1), diff --git a/core/node/node_sync/src/sync_state.rs b/core/node/node_sync/src/sync_state.rs index e061ff7da012..1ffec757c9b1 100644 --- a/core/node/node_sync/src/sync_state.rs +++ b/core/node/node_sync/src/sync_state.rs @@ -4,6 +4,7 @@ use async_trait::async_trait; use serde::Serialize; use tokio::sync::watch; use zksync_concurrency::{ctx, sync}; +use zksync_consensus_roles::validator; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_health_check::{CheckHealth, Health, HealthStatus}; use zksync_shared_metrics::EN_METRICS; @@ -50,18 +51,20 @@ impl SyncState { .unwrap(); } + /// Waits until the main node block is greater or equal to the given block number. + /// Returns the current main node block number. pub async fn wait_for_main_node_block( &self, ctx: &ctx::Ctx, - want: L2BlockNumber, - ) -> ctx::OrCanceled<()> { - sync::wait_for( - ctx, - &mut self.0.subscribe(), - |inner| matches!(inner.main_node_block, Some(got) if got >= want), - ) - .await?; - Ok(()) + pred: impl Fn(validator::BlockNumber) -> bool, + ) -> ctx::OrCanceled { + sync::wait_for_some(ctx, &mut self.0.subscribe(), |inner| { + inner + .main_node_block + .map(|n| validator::BlockNumber(n.0.into())) + .filter(|n| pred(*n)) + }) + .await } pub fn set_main_node_block(&self, block: L2BlockNumber) { @@ -173,6 +176,7 @@ impl CheckHealth for SyncState { Health::from(&*self.0.borrow()) } } + impl SyncStateInner { fn is_synced(&self) -> (bool, Option) { if let (Some(main_node_block), Some(local_block)) = (self.main_node_block, self.local_block) diff --git a/core/node/node_sync/src/tests.rs b/core/node/node_sync/src/tests.rs index 3f5791cdf24c..172a00e8c14c 100644 --- a/core/node/node_sync/src/tests.rs +++ b/core/node/node_sync/src/tests.rs @@ -2,6 +2,7 @@ use std::{iter, sync::Arc, time::Duration}; +use backon::{ConstantBuilder, Retryable}; use test_casing::test_casing; use tokio::{sync::watch, task::JoinHandle}; use zksync_contracts::BaseSystemContractsHashes; @@ -18,7 +19,7 @@ use zksync_state_keeper::{ }; use zksync_types::{ api, - block::L2BlockHasher, + block::{L2BlockHasher, UnsealedL1BatchHeader}, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, @@ -43,6 +44,7 @@ fn open_l1_batch(number: u32, timestamp: u64, first_l2_block_number: u32) -> Syn timestamp, virtual_blocks: 1, }, + pubdata_params: Default::default(), }, number: L1BatchNumber(number), first_l2_block_number: L2BlockNumber(first_l2_block_number), @@ -66,6 +68,7 @@ impl MockMainNodeClient { virtual_blocks: Some(0), hash: Some(snapshot.l2_block_hash), protocol_version: ProtocolVersionId::latest(), + pubdata_params: Default::default(), }; Self { @@ -105,7 +108,9 @@ impl StateKeeperHandles { let sync_state = SyncState::default(); let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(pool.clone(), Address::repeat_byte(1), 5); + StateKeeperPersistence::new(pool.clone(), Some(Address::repeat_byte(1)), 5) + .await + .unwrap(); let tree_writes_persistence = TreeWritesPersistence::new(pool.clone()); let output_handler = OutputHandler::new(Box::new(persistence.with_tx_insertion())) .with_handler(Box::new(tree_writes_persistence)) @@ -652,3 +657,101 @@ async fn external_io_with_multiple_l1_batches() { assert_eq!(fictive_l2_block.timestamp, 2); assert_eq!(fictive_l2_block.l2_tx_count, 0); } + +async fn wait_for_batch_to_be_open( + pool: &ConnectionPool, + number: L1BatchNumber, +) -> anyhow::Result { + (|| async { + let mut storage = pool.connection().await.unwrap(); + let unsealed_batch = storage.blocks_dal().get_unsealed_l1_batch().await?; + + if let Some(unsealed_batch) = unsealed_batch { + if unsealed_batch.number == number { + Ok(unsealed_batch) + } else { + Err(anyhow::anyhow!("L1 batch #{number} is not open yet")) + } + } else { + Err(anyhow::anyhow!("No unsealed L1 batch found yet")) + } + }) + .retry( + &ConstantBuilder::default() + .with_delay(Duration::from_millis(200)) + .with_max_times(20), + ) + .await +} + +#[tokio::test] +async fn external_io_empty_unsealed_batch() { + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + ensure_genesis(&mut storage).await; + drop(storage); + + let open_batch_one = open_l1_batch(1, 1, 1); + let tx = create_l2_transaction(10, 100); + let tx_hash = tx.hash(); + let tx = FetchedTransaction::new(tx.into()); + let open_batch_two = open_l1_batch(2, 2, 3); + let fictive_l2_block = SyncAction::L2Block { + params: L2BlockParams { + timestamp: 2, + virtual_blocks: 0, + }, + number: L2BlockNumber(2), + }; + let actions1 = vec![open_batch_one, tx.into(), SyncAction::SealL2Block]; + let actions2 = vec![fictive_l2_block, SyncAction::SealBatch]; + + let (actions_sender, action_queue) = ActionQueue::new(); + let client = MockMainNodeClient::default(); + let state_keeper = + StateKeeperHandles::new(pool.clone(), client, action_queue, &[&[tx_hash]]).await; + actions_sender.push_actions(actions1).await.unwrap(); + actions_sender.push_actions(actions2).await.unwrap(); + // Unchecked insert of batch #2 to simulate restart in the middle of processing an action sequence + // In other words batch #2 is inserted completely empty with no blocks/txs present in it + actions_sender + .push_action_unchecked(open_batch_two.clone()) + .await + .unwrap(); + // Wait until the L2 block is sealed. + state_keeper.wait_for_local_block(L2BlockNumber(2)).await; + + // Wait until L1 batch #2 is opened and persisted. + let unsealed_batch = wait_for_batch_to_be_open(&pool, L1BatchNumber(2)) + .await + .unwrap(); + assert_eq!(unsealed_batch.number, L1BatchNumber(2)); + assert_eq!(unsealed_batch.timestamp, 2); + + // Prepare the rest of batch #2 + let tx = create_l2_transaction(20, 200); + let tx_hash = tx.hash(); + let tx = FetchedTransaction::new(tx.into()); + let fictive_l2_block = SyncAction::L2Block { + params: L2BlockParams { + timestamp: 4, + virtual_blocks: 0, + }, + number: L2BlockNumber(4), + }; + let actions1 = vec![open_batch_two, tx.into(), SyncAction::SealL2Block]; + let actions2 = vec![fictive_l2_block, SyncAction::SealBatch]; + + // Restart state keeper + let (actions_sender, action_queue) = ActionQueue::new(); + let client = MockMainNodeClient::default(); + let state_keeper = + StateKeeperHandles::new(pool.clone(), client, action_queue, &[&[tx_hash]]).await; + actions_sender.push_actions(actions1).await.unwrap(); + actions_sender.push_actions(actions2).await.unwrap(); + + let hash_task = tokio::spawn(mock_l1_batch_hash_computation(pool.clone(), 1)); + // Wait until the block #4 is sealed. + state_keeper.wait_for_local_block(L2BlockNumber(4)).await; + hash_task.await.unwrap(); +} diff --git a/core/node/proof_data_handler/Cargo.toml b/core/node/proof_data_handler/Cargo.toml index 82063b23fdb5..1bcda394a674 100644 --- a/core/node/proof_data_handler/Cargo.toml +++ b/core/node/proof_data_handler/Cargo.toml @@ -17,9 +17,11 @@ zksync_dal.workspace = true zksync_object_store.workspace = true zksync_prover_interface.workspace = true zksync_types.workspace = true +zksync_vm_executor.workspace = true anyhow.workspace = true axum.workspace = true tokio.workspace = true +tower-http = { workspace = true, features = ["compression-zstd", "decompression-zstd"] } tracing.workspace = true [dev-dependencies] diff --git a/core/node/proof_data_handler/src/errors.rs b/core/node/proof_data_handler/src/errors.rs index 15ef393294aa..7d0e33ea0a3a 100644 --- a/core/node/proof_data_handler/src/errors.rs +++ b/core/node/proof_data_handler/src/errors.rs @@ -6,6 +6,7 @@ use zksync_dal::DalError; use zksync_object_store::ObjectStoreError; pub(crate) enum RequestProcessorError { + GeneralError(String), ObjectStore(ObjectStoreError), Dal(DalError), } @@ -19,24 +20,26 @@ impl From for RequestProcessorError { impl IntoResponse for RequestProcessorError { fn into_response(self) -> Response { let (status_code, message) = match self { - RequestProcessorError::ObjectStore(err) => { + Self::GeneralError(err) => { + tracing::error!("Error: {:?}", err); + ( + StatusCode::INTERNAL_SERVER_ERROR, + "An internal error occurred".to_owned(), + ) + } + Self::ObjectStore(err) => { tracing::error!("GCS error: {:?}", err); ( StatusCode::BAD_GATEWAY, "Failed fetching/saving from GCS".to_owned(), ) } - RequestProcessorError::Dal(err) => { + Self::Dal(err) => { tracing::error!("Sqlx error: {:?}", err); - match err.inner() { - zksync_dal::SqlxError::RowNotFound => { - (StatusCode::NOT_FOUND, "Non existing L1 batch".to_owned()) - } - _ => ( - StatusCode::BAD_GATEWAY, - "Failed fetching/saving from db".to_owned(), - ), - } + ( + StatusCode::BAD_GATEWAY, + "Failed fetching/saving from db".to_owned(), + ) } }; (status_code, message).into_response() diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index 51780f03230d..e014fca15d77 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -1,7 +1,7 @@ use std::{net::SocketAddr, sync::Arc}; use anyhow::Context as _; -use axum::{extract::Path, routing::post, Json, Router}; +use axum::{extract::Path, http::StatusCode, response::IntoResponse, routing::post, Json, Router}; use request_processor::RequestProcessor; use tee_request_processor::TeeRequestProcessor; use tokio::sync::watch; @@ -12,7 +12,7 @@ use zksync_prover_interface::api::{ ProofGenerationDataRequest, RegisterTeeAttestationRequest, SubmitProofRequest, SubmitTeeProofRequest, TeeProofGenerationDataRequest, }; -use zksync_types::commitment::L1BatchCommitmentMode; +use zksync_types::{commitment::L1BatchCommitmentMode, L2ChainId}; #[cfg(test)] mod tests; @@ -27,11 +27,18 @@ pub async fn run_server( blob_store: Arc, connection_pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, mut stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { let bind_address = SocketAddr::from(([0, 0, 0, 0], config.http_port)); tracing::info!("Starting proof data handler server on {bind_address}"); - let app = create_proof_processing_router(blob_store, connection_pool, config, commitment_mode); + let app = create_proof_processing_router( + blob_store, + connection_pool, + config, + commitment_mode, + l2_chain_id, + ); let listener = tokio::net::TcpListener::bind(bind_address) .await @@ -54,6 +61,7 @@ fn create_proof_processing_router( connection_pool: ConnectionPool, config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, ) -> Router { let get_proof_gen_processor = RequestProcessor::new( blob_store.clone(), @@ -86,9 +94,9 @@ fn create_proof_processing_router( ), ); - if config.tee_support { + if config.tee_config.tee_support { let get_tee_proof_gen_processor = - TeeRequestProcessor::new(blob_store, connection_pool, config.clone()); + TeeRequestProcessor::new(blob_store, connection_pool, config.clone(), l2_chain_id); let submit_tee_proof_processor = get_tee_proof_gen_processor.clone(); let register_tee_attestation_processor = get_tee_proof_gen_processor.clone(); @@ -96,9 +104,15 @@ fn create_proof_processing_router( "/tee/proof_inputs", post( move |payload: Json| async move { - get_tee_proof_gen_processor + let result = get_tee_proof_gen_processor .get_proof_generation_data(payload) - .await + .await; + + match result { + Ok(Some(data)) => (StatusCode::OK, data).into_response(), + Ok(None) => { StatusCode::NO_CONTENT.into_response()}, + Err(e) => e.into_response(), + } }, ), ) @@ -125,4 +139,6 @@ fn create_proof_processing_router( } router + .layer(tower_http::compression::CompressionLayer::new()) + .layer(tower_http::decompression::RequestDecompressionLayer::new().zstd(true)) } diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index ee266a88971e..89304724a7c2 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -17,7 +17,7 @@ use zksync_types::{ basic_fri_types::Eip4844Blobs, commitment::{serialize_commitments, L1BatchCommitmentMode}, web3::keccak256, - L1BatchNumber, H256, + L1BatchNumber, ProtocolVersionId, H256, STATE_DIFF_HASH_KEY_PRE_GATEWAY, }; use crate::{errors::RequestProcessorError, metrics::METRICS}; @@ -226,58 +226,63 @@ impl RequestProcessor { .unwrap() .expect("Proved block without metadata"); - let is_pre_boojum = l1_batch + let protocol_version = l1_batch .header .protocol_version - .map(|v| v.is_pre_boojum()) - .unwrap_or(true); - if !is_pre_boojum { - let events_queue_state = l1_batch - .metadata - .events_queue_commitment - .expect("No events_queue_commitment"); - let bootloader_heap_initial_content = l1_batch - .metadata - .bootloader_initial_content_commitment - .expect("No bootloader_initial_content_commitment"); - - if events_queue_state != events_queue_state_from_prover - || bootloader_heap_initial_content - != bootloader_heap_initial_content_from_prover - { - let server_values = format!("events_queue_state = {events_queue_state}, bootloader_heap_initial_content = {bootloader_heap_initial_content}"); - let prover_values = format!("events_queue_state = {events_queue_state_from_prover}, bootloader_heap_initial_content = {bootloader_heap_initial_content_from_prover}"); - panic!( - "Auxilary output doesn't match, server values: {} prover values: {}", - server_values, prover_values - ); - } + .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); + + let events_queue_state = l1_batch + .metadata + .events_queue_commitment + .expect("No events_queue_commitment"); + let bootloader_heap_initial_content = l1_batch + .metadata + .bootloader_initial_content_commitment + .expect("No bootloader_initial_content_commitment"); + + if events_queue_state != events_queue_state_from_prover + || bootloader_heap_initial_content + != bootloader_heap_initial_content_from_prover + { + panic!( + "Auxilary output doesn't match\n\ + server values: events_queue_state = {events_queue_state}, bootloader_heap_initial_content = {bootloader_heap_initial_content}\n\ + prover values: events_queue_state = {events_queue_state_from_prover}, bootloader_heap_initial_content = {bootloader_heap_initial_content_from_prover}", + ); } let system_logs = serialize_commitments(&l1_batch.header.system_logs); let system_logs_hash = H256(keccak256(&system_logs)); - if !is_pre_boojum { - let state_diff_hash = l1_batch + let state_diff_hash = if protocol_version.is_pre_gateway() { + l1_batch .header .system_logs - .into_iter() - .find(|elem| elem.0.key == H256::from_low_u64_be(2)) - .expect("No state diff hash key") - .0 - .value; - - if state_diff_hash != state_diff_hash_from_prover - || system_logs_hash != system_logs_hash_from_prover - { - let server_values = format!("system_logs_hash = {system_logs_hash}, state_diff_hash = {state_diff_hash}"); - let prover_values = format!("system_logs_hash = {system_logs_hash_from_prover}, state_diff_hash = {state_diff_hash_from_prover}"); - panic!( - "Auxilary output doesn't match, server values: {} prover values: {}", - server_values, prover_values - ); - } + .iter() + .find_map(|log| { + (log.0.key + == H256::from_low_u64_be(STATE_DIFF_HASH_KEY_PRE_GATEWAY as u64)) + .then_some(log.0.value) + }) + .expect("Failed to get state_diff_hash from system logs") + } else { + l1_batch + .metadata + .state_diff_hash + .expect("Failed to get state_diff_hash from metadata") + }; + + if state_diff_hash != state_diff_hash_from_prover + || system_logs_hash != system_logs_hash_from_prover + { + let server_values = format!("system_logs_hash = {system_logs_hash}, state_diff_hash = {state_diff_hash}"); + let prover_values = format!("system_logs_hash = {system_logs_hash_from_prover}, state_diff_hash = {state_diff_hash_from_prover}"); + panic!( + "Auxilary output doesn't match, server values: {} prover values: {}", + server_values, prover_values + ); } + storage .proof_generation_dal() .save_proof_artifacts_metadata(l1_batch_number, &blob_url) diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 4ae1a5026f14..b265b94d4d74 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -4,11 +4,17 @@ use axum::{extract::Path, Json}; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_object_store::{ObjectStore, ObjectStoreError}; -use zksync_prover_interface::api::{ - RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitProofResponse, - SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, +use zksync_prover_interface::{ + api::{ + RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitProofResponse, + SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, + }, + inputs::{ + TeeVerifierInput, V1TeeVerifierInput, VMRunWitnessInputData, WitnessInputMerklePaths, + }, }; -use zksync_types::{tee_types::TeeType, L1BatchNumber}; +use zksync_types::{tee_types::TeeType, L1BatchNumber, L2ChainId}; +use zksync_vm_executor::storage::L1BatchParamsProvider; use crate::errors::RequestProcessorError; @@ -17,6 +23,7 @@ pub(crate) struct TeeRequestProcessor { blob_store: Arc, pool: ConnectionPool, config: ProofDataHandlerConfig, + l2_chain_id: L2ChainId, } impl TeeRequestProcessor { @@ -24,45 +31,52 @@ impl TeeRequestProcessor { blob_store: Arc, pool: ConnectionPool, config: ProofDataHandlerConfig, + l2_chain_id: L2ChainId, ) -> Self { Self { blob_store, pool, config, + l2_chain_id, } } pub(crate) async fn get_proof_generation_data( &self, request: Json, - ) -> Result, RequestProcessorError> { + ) -> Result>, RequestProcessorError> { tracing::info!("Received request for proof generation data: {:?}", request); - let mut min_batch_number: Option = None; + let mut min_batch_number = self.config.tee_config.first_tee_processed_batch; let mut missing_range: Option<(L1BatchNumber, L1BatchNumber)> = None; let result = loop { - let l1_batch_number = match self + let Some(l1_batch_number) = self .lock_batch_for_proving(request.tee_type, min_batch_number) .await? - { - Some(number) => number, - None => break Ok(Json(TeeProofGenerationDataResponse(None))), + else { + // No job available + return Ok(None); }; - match self.blob_store.get(l1_batch_number).await { - Ok(input) => break Ok(Json(TeeProofGenerationDataResponse(Some(Box::new(input))))), - Err(ObjectStoreError::KeyNotFound(_)) => { + match self + .tee_verifier_input_for_existing_batch(l1_batch_number) + .await + { + Ok(input) => { + break Ok(Some(Json(TeeProofGenerationDataResponse(Box::new(input))))); + } + Err(RequestProcessorError::ObjectStore(ObjectStoreError::KeyNotFound(_))) => { missing_range = match missing_range { Some((start, _)) => Some((start, l1_batch_number)), None => Some((l1_batch_number, l1_batch_number)), }; self.unlock_batch(l1_batch_number, request.tee_type).await?; - min_batch_number = Some(min_batch_number.unwrap_or(l1_batch_number) + 1); + min_batch_number = l1_batch_number + 1; } Err(err) => { self.unlock_batch(l1_batch_number, request.tee_type).await?; - break Err(RequestProcessorError::ObjectStore(err)); + break Err(err); } } }; @@ -78,23 +92,83 @@ impl TeeRequestProcessor { result } + #[tracing::instrument(skip(self))] + async fn tee_verifier_input_for_existing_batch( + &self, + l1_batch_number: L1BatchNumber, + ) -> Result { + let vm_run_data: VMRunWitnessInputData = self + .blob_store + .get(l1_batch_number) + .await + .map_err(RequestProcessorError::ObjectStore)?; + + let merkle_paths: WitnessInputMerklePaths = self + .blob_store + .get(l1_batch_number) + .await + .map_err(RequestProcessorError::ObjectStore)?; + + let mut connection = self + .pool + .connection_tagged("tee_request_processor") + .await + .map_err(RequestProcessorError::Dal)?; + + let l2_blocks_execution_data = connection + .transactions_dal() + .get_l2_blocks_to_execute_for_l1_batch(l1_batch_number) + .await + .map_err(RequestProcessorError::Dal)?; + + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut connection) + .await + .map_err(|err| RequestProcessorError::GeneralError(err.to_string()))?; + + // In the state keeper, this value is used to reject execution. + // All batches have already been executed by State Keeper. + // This means we don't want to reject any execution, therefore we're using MAX as an allow all. + let validation_computational_gas_limit = u32::MAX; + + let (system_env, l1_batch_env, pubdata_params) = l1_batch_params_provider + .load_l1_batch_env( + &mut connection, + l1_batch_number, + validation_computational_gas_limit, + self.l2_chain_id, + ) + .await + .map_err(|err| RequestProcessorError::GeneralError(err.to_string()))? + .ok_or(RequestProcessorError::GeneralError( + "system_env, l1_batch_env missing".into(), + ))?; + + Ok(TeeVerifierInput::new(V1TeeVerifierInput { + vm_run_data, + merkle_paths, + l2_blocks_execution_data, + l1_batch_env, + system_env, + pubdata_params, + })) + } + async fn lock_batch_for_proving( &self, tee_type: TeeType, - min_batch_number: Option, + min_batch_number: L1BatchNumber, ) -> Result, RequestProcessorError> { - let result = self - .pool - .connection() + self.pool + .connection_tagged("tee_request_processor") .await? .tee_proof_generation_dal() .lock_batch_for_proving( tee_type, - self.config.proof_generation_timeout(), + self.config.tee_config.tee_proof_generation_timeout(), min_batch_number, ) - .await?; - Ok(result) + .await + .map_err(RequestProcessorError::Dal) } async fn unlock_batch( @@ -103,7 +177,7 @@ impl TeeRequestProcessor { tee_type: TeeType, ) -> Result<(), RequestProcessorError> { self.pool - .connection() + .connection_tagged("tee_request_processor") .await? .tee_proof_generation_dal() .unlock_batch(l1_batch_number, tee_type) @@ -117,7 +191,7 @@ impl TeeRequestProcessor { Json(proof): Json, ) -> Result, RequestProcessorError> { let l1_batch_number = L1BatchNumber(l1_batch_number); - let mut connection = self.pool.connection().await?; + let mut connection = self.pool.connection_tagged("tee_request_processor").await?; let mut dal = connection.tee_proof_generation_dal(); tracing::info!( @@ -143,7 +217,7 @@ impl TeeRequestProcessor { ) -> Result, RequestProcessorError> { tracing::info!("Received attestation: {:?}", payload); - let mut connection = self.pool.connection().await?; + let mut connection = self.pool.connection_tagged("tee_request_processor").await?; let mut dal = connection.tee_proof_generation_dal(); dal.save_attestation(&payload.pubkey, &payload.attestation) diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 8220aef5da0b..87c6bff8a1f4 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -1,5 +1,3 @@ -use std::time::Instant; - use axum::{ body::Body, http::{self, Method, Request, StatusCode}, @@ -8,128 +6,68 @@ use axum::{ }; use serde_json::json; use tower::ServiceExt; -use zksync_basic_types::U256; -use zksync_config::configs::ProofDataHandlerConfig; -use zksync_contracts::{BaseSystemContracts, SystemContractCode}; +use zksync_basic_types::L2ChainId; +use zksync_config::configs::{ProofDataHandlerConfig, TeeConfig}; use zksync_dal::{ConnectionPool, CoreDal}; -use zksync_multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use zksync_object_store::MockObjectStore; -use zksync_prover_interface::{ - api::SubmitTeeProofRequest, - inputs::{TeeVerifierInput, V1TeeVerifierInput, WitnessInputMerklePaths}, -}; -use zksync_types::{commitment::L1BatchCommitmentMode, tee_types::TeeType, L1BatchNumber, H256}; +use zksync_prover_interface::api::SubmitTeeProofRequest; +use zksync_types::{commitment::L1BatchCommitmentMode, tee_types::TeeType, L1BatchNumber}; use crate::create_proof_processing_router; -// Test the /tee/proof_inputs endpoint by: -// 1. Mocking an object store with a single batch blob containing TEE verifier input -// 2. Populating the SQL db with relevant information about the status of the TEE verifier input and -// TEE proof generation -// 3. Sending a request to the /tee/proof_inputs endpoint and asserting that the response -// matches the file from the object store #[tokio::test] async fn request_tee_proof_inputs() { - // prepare a sample mocked TEE verifier input - - let batch_number = L1BatchNumber::from(1); - let tvi = V1TeeVerifierInput::new( - WitnessInputMerklePaths::new(0), - vec![], - L1BatchEnv { - previous_batch_hash: Some(H256([1; 32])), - number: batch_number, - timestamp: 0, - fee_input: Default::default(), - fee_account: Default::default(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 0, - timestamp: 0, - prev_block_hash: H256([1; 32]), - max_virtual_blocks_to_create: 0, - }, - }, - SystemEnv { - zk_porter_available: false, - version: Default::default(), - base_system_smart_contracts: BaseSystemContracts { - bootloader: SystemContractCode { - code: vec![U256([1; 4])], - hash: H256([1; 32]), - }, - default_aa: SystemContractCode { - code: vec![U256([1; 4])], - hash: H256([1; 32]), - }, - evm_emulator: None, - }, - bootloader_gas_limit: 0, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: 0, - chain_id: Default::default(), - }, - vec![(H256([1; 32]), vec![0, 1, 2, 3, 4])], - ); - let tvi = TeeVerifierInput::V1(tvi); - - // populate mocked object store with a single batch blob - - let blob_store = MockObjectStore::arc(); - let object_path = blob_store.put(batch_number, &tvi).await.unwrap(); - - // get connection to the SQL db and mock the status of the TEE proof generation - let db_conn_pool = ConnectionPool::test_pool().await; - mock_tee_batch_status(db_conn_pool.clone(), batch_number, &object_path).await; - - // test the /tee/proof_inputs endpoint; it should return the batch from the object store let app = create_proof_processing_router( - blob_store, - db_conn_pool, + MockObjectStore::arc(), + db_conn_pool.clone(), ProofDataHandlerConfig { http_port: 1337, proof_generation_timeout_in_secs: 10, - tee_support: true, + tee_config: TeeConfig { + tee_support: true, + first_tee_processed_batch: L1BatchNumber(0), + tee_proof_generation_timeout_in_secs: 600, + }, }, L1BatchCommitmentMode::Rollup, + L2ChainId::default(), ); - let req_body = Body::from(serde_json::to_vec(&json!({ "tee_type": "sgx" })).unwrap()); - let response = app - .oneshot( - Request::builder() - .method(Method::POST) - .uri("/tee/proof_inputs") - .header(http::header::CONTENT_TYPE, "application/json") - .body(req_body) - .unwrap(), - ) - .await - .unwrap(); - - assert_eq!(response.status(), StatusCode::OK); - - let body = axum::body::to_bytes(response.into_body(), usize::MAX) - .await - .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); - let deserialized: TeeVerifierInput = serde_json::from_value(json).unwrap(); - - assert_eq!(tvi, deserialized); + let test_cases = vec![ + (json!({ "tee_type": "sgx" }), StatusCode::NO_CONTENT), + ( + json!({ "tee_type": "Sgx" }), + StatusCode::UNPROCESSABLE_ENTITY, + ), + ]; + + for (body, expected_status) in test_cases { + let req_body = Body::from(serde_json::to_vec(&body).unwrap()); + let response = app + .clone() + .oneshot( + Request::builder() + .method(Method::POST) + .uri("/tee/proof_inputs") + .header(http::header::CONTENT_TYPE, "application/json") + .body(req_body) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), expected_status); + } } // Test /tee/submit_proofs endpoint using a mocked TEE proof and verify response and db state #[tokio::test] async fn submit_tee_proof() { - let blob_store = MockObjectStore::arc(); - let db_conn_pool = ConnectionPool::test_pool().await; - let object_path = "mocked_object_path"; let batch_number = L1BatchNumber::from(1); + let db_conn_pool = ConnectionPool::test_pool().await; - mock_tee_batch_status(db_conn_pool.clone(), batch_number, object_path).await; - - // send a request to the /tee/submit_proofs endpoint, using a mocked TEE proof + mock_tee_batch_status(db_conn_pool.clone(), batch_number).await; let tee_proof_request_str = r#"{ "signature": "0001020304", @@ -141,14 +79,19 @@ async fn submit_tee_proof() { serde_json::from_str::(tee_proof_request_str).unwrap(); let uri = format!("/tee/submit_proofs/{}", batch_number.0); let app = create_proof_processing_router( - blob_store, + MockObjectStore::arc(), db_conn_pool.clone(), ProofDataHandlerConfig { http_port: 1337, proof_generation_timeout_in_secs: 10, - tee_support: true, + tee_config: TeeConfig { + tee_support: true, + first_tee_processed_batch: L1BatchNumber(0), + tee_proof_generation_timeout_in_secs: 600, + }, }, L1BatchCommitmentMode::Rollup, + L2ChainId::default(), ); // this should fail because we haven't saved the attestation for the pubkey yet @@ -207,32 +150,15 @@ async fn submit_tee_proof() { async fn mock_tee_batch_status( db_conn_pool: ConnectionPool, batch_number: L1BatchNumber, - object_path: &str, ) { let mut proof_db_conn = db_conn_pool.connection().await.unwrap(); let mut proof_dal = proof_db_conn.tee_proof_generation_dal(); - let mut input_db_conn = db_conn_pool.connection().await.unwrap(); - let mut input_producer_dal = input_db_conn.tee_verifier_input_producer_dal(); // there should not be any batches awaiting proof in the db yet let oldest_batch_number = proof_dal.get_oldest_unpicked_batch().await.unwrap(); assert!(oldest_batch_number.is_none()); - // mock SQL table with relevant information about the status of the TEE verifier input - - input_producer_dal - .create_tee_verifier_input_producer_job(batch_number) - .await - .expect("Failed to create tee_verifier_input_producer_job"); - - // pretend that the TEE verifier input blob file was fetched successfully - - input_producer_dal - .mark_job_as_successful(batch_number, Instant::now(), object_path) - .await - .expect("Failed to mark tee_verifier_input_producer_job job as successful"); - // mock SQL table with relevant information about the status of TEE proof generation proof_dal diff --git a/core/node/shared_metrics/src/lib.rs b/core/node/shared_metrics/src/lib.rs index e0a7fa74ef42..2c41ec9293a0 100644 --- a/core/node/shared_metrics/src/lib.rs +++ b/core/node/shared_metrics/src/lib.rs @@ -29,7 +29,6 @@ pub enum InitStage { EthTxAggregator, EthTxManager, Tree, - TeeVerifierInputProducer, Consensus, DADispatcher, } @@ -45,7 +44,6 @@ impl fmt::Display for InitStage { Self::EthTxAggregator => formatter.write_str("eth_tx_aggregator"), Self::EthTxManager => formatter.write_str("eth_tx_manager"), Self::Tree => formatter.write_str("tree"), - Self::TeeVerifierInputProducer => formatter.write_str("tee_verifier_input_producer"), Self::Consensus => formatter.write_str("consensus"), Self::DADispatcher => formatter.write_str("da_dispatcher"), } diff --git a/core/node/state_keeper/Cargo.toml b/core/node/state_keeper/Cargo.toml index 0e924b9f066d..49d4209a4c4f 100644 --- a/core/node/state_keeper/Cargo.toml +++ b/core/node/state_keeper/Cargo.toml @@ -22,10 +22,8 @@ zksync_mempool.workspace = true zksync_shared_metrics.workspace = true zksync_config.workspace = true zksync_node_fee_model.workspace = true -zksync_utils.workspace = true zksync_contracts.workspace = true zksync_protobuf.workspace = true -zksync_test_account.workspace = true zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true zksync_vm_executor.workspace = true @@ -47,7 +45,6 @@ assert_matches.workspace = true rand.workspace = true tempfile.workspace = true test-casing.workspace = true -futures.workspace = true zksync_eth_client.workspace = true -zksync_system_constants.workspace = true +zksync_test_contracts.workspace = true diff --git a/core/node/state_keeper/src/executor/tests/mod.rs b/core/node/state_keeper/src/executor/tests/mod.rs index 04fb016ab639..eade0233d0e0 100644 --- a/core/node/state_keeper/src/executor/tests/mod.rs +++ b/core/node/state_keeper/src/executor/tests/mod.rs @@ -5,14 +5,12 @@ use rand::{thread_rng, Rng}; use test_casing::{test_casing, Product}; use zksync_dal::{ConnectionPool, Core}; use zksync_multivm::interface::{BatchTransactionExecutionResult, ExecutionResult, Halt}; -use zksync_test_account::Account; +use zksync_test_contracts::{Account, TestContract}; use zksync_types::{ - get_nonce_key, utils::storage_key_for_eth_balance, vm::FastVmMode, PriorityOpId, + get_nonce_key, utils::storage_key_for_eth_balance, vm::FastVmMode, web3, PriorityOpId, H256, }; -use self::tester::{ - AccountFailedCall, AccountLoadNextExecutable, StorageSnapshot, TestConfig, Tester, -}; +use self::tester::{AccountExt, StorageSnapshot, TestConfig, Tester}; mod read_storage_factory; mod tester; @@ -26,6 +24,11 @@ fn assert_executed(execution_result: &BatchTransactionExecutionResult) { ); } +fn assert_succeeded(execution_result: &BatchTransactionExecutionResult) { + let result = &execution_result.tx_result.result; + assert_matches!(result, ExecutionResult::Success { .. }) +} + /// Ensures that the transaction was rejected by the VM. fn assert_rejected(execution_result: &BatchTransactionExecutionResult) { let result = &execution_result.tx_result.result; @@ -173,6 +176,62 @@ async fn execute_l2_and_l1_txs(vm_mode: FastVmMode) { executor.finish_batch().await.unwrap(); } +#[tokio::test] +async fn working_with_transient_storage() { + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let mut alice = Account::random(); + + let mut tester = Tester::new(connection_pool, FastVmMode::Shadow); + tester.genesis().await; + tester.fund(&[alice.address()]).await; + let mut executor = tester + .create_batch_executor(StorageType::AsyncRocksdbCache) + .await; + + let deploy_tx = alice.deploy_storage_tester(); + let res = executor.execute_tx(deploy_tx.tx).await.unwrap(); + assert_succeeded(&res); + + let storage_test_address = deploy_tx.address; + let test_tx = alice.test_transient_store(storage_test_address); + let res = executor.execute_tx(test_tx).await.unwrap(); + assert_succeeded(&res); + + let test_tx = alice.assert_transient_value(storage_test_address, 0.into()); + let res = executor.execute_tx(test_tx).await.unwrap(); + assert_succeeded(&res); + + executor.finish_batch().await.unwrap(); +} + +#[tokio::test] +async fn decommitting_contract() { + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let mut alice = Account::random(); + + let mut tester = Tester::new(connection_pool, FastVmMode::Shadow); + tester.genesis().await; + tester.fund(&[alice.address()]).await; + let mut executor = tester + .create_batch_executor(StorageType::AsyncRocksdbCache) + .await; + + let deploy_tx = alice.deploy_precompiles_test(); + let res = executor.execute_tx(deploy_tx.tx).await.unwrap(); + assert_succeeded(&res); + + let keccak_bytecode_hash = web3::keccak256(TestContract::precompiles_test().bytecode); + let test_tx = alice.test_decommit( + deploy_tx.address, + deploy_tx.bytecode_hash, + H256(keccak_bytecode_hash), + ); + let res = executor.execute_tx(test_tx).await.unwrap(); + assert_succeeded(&res); + + executor.finish_batch().await.unwrap(); +} + /// Checks that we can successfully rollback the transaction and execute it once again. #[test_casing(3, FAST_VM_MODES)] #[tokio::test] @@ -296,7 +355,7 @@ async fn deploy_and_call_loadtest(vm_mode: FastVmMode) { ); assert_executed( &executor - .execute_tx(alice.loadnext_custom_writes_call(tx.address, 1, 500_000_000)) + .execute_tx(alice.loadnext_custom_initial_writes_call(tx.address, 1, 500_000_000)) .await .unwrap(), ); @@ -316,7 +375,7 @@ async fn deploy_failedcall(vm_mode: FastVmMode) { .create_batch_executor(StorageType::AsyncRocksdbCache) .await; - let tx = alice.deploy_failedcall_tx(); + let tx = alice.deploy_failed_call_tx(); let execute_tx = executor.execute_tx(tx.tx).await.unwrap(); assert_executed(&execute_tx); @@ -344,7 +403,7 @@ async fn execute_reverted_tx(vm_mode: FastVmMode) { assert_reverted( &executor - .execute_tx(alice.loadnext_custom_writes_call( + .execute_tx(alice.loadnext_custom_initial_writes_call( tx.address, 1, 1_000_000, // We provide enough gas for tx to be executed, but not enough for the call to be successful. )) diff --git a/core/node/state_keeper/src/executor/tests/tester.rs b/core/node/state_keeper/src/executor/tests/tester.rs index 79072f23aed9..3727d9c16bfb 100644 --- a/core/node/state_keeper/src/executor/tests/tester.rs +++ b/core/node/state_keeper/src/executor/tests/tester.rs @@ -6,10 +6,6 @@ use std::{collections::HashMap, fmt::Debug, sync::Arc}; use tempfile::TempDir; use tokio::{sync::watch, task::JoinHandle}; use zksync_config::configs::chain::StateKeeperConfig; -use zksync_contracts::{ - get_loadnext_contract, load_contract, read_bytecode, - test_contracts::LoadnextContractExecutionParams, TestContract, -}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_multivm::{ interface::{ @@ -19,22 +15,25 @@ use zksync_multivm::{ utils::StorageWritesDeduplicator, vm_latest::constants::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; -use zksync_node_genesis::{create_genesis_l1_batch, GenesisParams}; +use zksync_node_genesis::create_genesis_l1_batch; use zksync_node_test_utils::{recover, Snapshot}; use zksync_state::{OwnedStorage, ReadStorageFactory, RocksdbStorageOptions}; -use zksync_test_account::{Account, DeployContractsTx, TxType}; +use zksync_test_contracts::{ + Account, DeployContractsTx, LoadnextContractExecutionParams, TestContract, TxType, +}; use zksync_types::{ block::L2BlockHasher, + commitment::PubdataParams, ethabi::Token, protocol_version::ProtocolSemanticVersion, snapshots::{SnapshotRecoveryStatus, SnapshotStorageLog}, system_contracts::get_system_smart_contracts, + u256_to_h256, utils::storage_key_for_standard_token_balance, vm::FastVmMode, AccountTreeId, Address, Execute, L1BatchNumber, L2BlockNumber, PriorityOpId, ProtocolVersionId, StorageLog, Transaction, H256, L2_BASE_TOKEN_ADDRESS, U256, }; -use zksync_utils::u256_to_h256; use zksync_vm_executor::batch::{MainBatchExecutorFactory, TraceCalls}; use super::{read_storage_factory::RocksdbStorageFactory, StorageType}; @@ -104,10 +103,9 @@ impl Tester { &mut self, storage_type: StorageType, ) -> Box> { - let (l1_batch_env, system_env) = self.default_batch_params(); + let (l1_batch_env, system_env, pubdata_params) = self.default_batch_params(); match storage_type { StorageType::AsyncRocksdbCache => { - let (l1_batch_env, system_env) = self.default_batch_params(); let (state_keeper_storage, task) = AsyncRocksdbCache::new( self.pool(), self.state_keeper_db_path(), @@ -122,6 +120,7 @@ impl Tester { Arc::new(state_keeper_storage), l1_batch_env, system_env, + pubdata_params, ) .await } @@ -133,12 +132,18 @@ impl Tester { )), l1_batch_env, system_env, + pubdata_params, ) .await } StorageType::Postgres => { - self.create_batch_executor_inner(Arc::new(self.pool()), l1_batch_env, system_env) - .await + self.create_batch_executor_inner( + Arc::new(self.pool()), + l1_batch_env, + system_env, + pubdata_params, + ) + .await } } } @@ -148,6 +153,7 @@ impl Tester { storage_factory: Arc, l1_batch_env: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Box> { let (_stop_sender, stop_receiver) = watch::channel(false); let storage = storage_factory @@ -158,11 +164,11 @@ impl Tester { if self.config.trace_calls { let mut executor = MainBatchExecutorFactory::::new(false); executor.set_fast_vm_mode(self.config.fast_vm_mode); - executor.init_batch(storage, l1_batch_env, system_env) + executor.init_batch(storage, l1_batch_env, system_env, pubdata_params) } else { let mut executor = MainBatchExecutorFactory::<()>::new(false); executor.set_fast_vm_mode(self.config.fast_vm_mode); - executor.init_batch(storage, l1_batch_env, system_env) + executor.init_batch(storage, l1_batch_env, system_env, pubdata_params) } } @@ -212,7 +218,7 @@ impl Tester { snapshot: &SnapshotRecoveryStatus, ) -> Box> { let current_timestamp = snapshot.l2_block_timestamp + 1; - let (mut l1_batch_env, system_env) = + let (mut l1_batch_env, system_env, pubdata_params) = self.batch_params(snapshot.l1_batch_number + 1, current_timestamp); l1_batch_env.previous_batch_hash = Some(snapshot.l1_batch_root_hash); l1_batch_env.first_l2_block = L2BlockEnv { @@ -222,11 +228,11 @@ impl Tester { max_virtual_blocks_to_create: 1, }; - self.create_batch_executor_inner(storage_factory, l1_batch_env, system_env) + self.create_batch_executor_inner(storage_factory, l1_batch_env, system_env, pubdata_params) .await } - pub(super) fn default_batch_params(&self) -> (L1BatchEnv, SystemEnv) { + pub(super) fn default_batch_params(&self) -> (L1BatchEnv, SystemEnv, PubdataParams) { // Not really important for the batch executor - it operates over a single batch. self.batch_params(L1BatchNumber(1), 100) } @@ -236,7 +242,7 @@ impl Tester { &self, l1_batch_number: L1BatchNumber, timestamp: u64, - ) -> (L1BatchEnv, SystemEnv) { + ) -> (L1BatchEnv, SystemEnv, PubdataParams) { let mut system_params = default_system_env(); if let Some(vm_gas_limit) = self.config.vm_gas_limit { system_params.bootloader_gas_limit = vm_gas_limit; @@ -245,7 +251,7 @@ impl Tester { self.config.validation_computational_gas_limit; let mut batch_params = default_l1_batch_env(l1_batch_number.0, timestamp, self.fee_account); batch_params.previous_batch_hash = Some(H256::zero()); // Not important in this context. - (batch_params, system_params) + (batch_params, system_params, PubdataParams::default()) } /// Performs the genesis in the storage. @@ -317,7 +323,7 @@ impl Tester { } } -pub trait AccountLoadNextExecutable { +pub(super) trait AccountExt { fn deploy_loadnext_tx(&mut self) -> DeployContractsTx; fn l1_execute(&mut self, serial_id: PriorityOpId) -> Transaction; @@ -327,7 +333,7 @@ pub trait AccountLoadNextExecutable { /// Returns an `execute` transaction with custom factory deps (which aren't used in a transaction, /// so they are mostly useful to test bytecode compression). fn execute_with_factory_deps(&mut self, factory_deps: Vec>) -> Transaction; - fn loadnext_custom_writes_call( + fn loadnext_custom_initial_writes_call( &mut self, address: Address, writes: u32, @@ -344,39 +350,38 @@ pub trait AccountLoadNextExecutable { gas_to_burn: u32, gas_limit: u32, ) -> Transaction; -} -pub trait AccountFailedCall { - fn deploy_failedcall_tx(&mut self) -> DeployContractsTx; -} + fn deploy_failed_call_tx(&mut self) -> DeployContractsTx; -impl AccountFailedCall for Account { - fn deploy_failedcall_tx(&mut self) -> DeployContractsTx { - let bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/failed-call/failed_call.sol/FailedCall.json"); - let failedcall_contract = TestContract { - bytecode, - contract: load_contract("etc/contracts-test-data/artifacts-zk/contracts/failed-call/failed_call.sol/FailedCall.json"), - factory_deps: vec![], - }; + fn deploy_storage_tester(&mut self) -> DeployContractsTx; - self.get_deploy_tx(&failedcall_contract.bytecode, None, TxType::L2) - } + fn test_transient_store(&mut self, address: Address) -> Transaction; + + fn assert_transient_value(&mut self, address: Address, expected: U256) -> Transaction; + + fn deploy_precompiles_test(&mut self) -> DeployContractsTx; + + fn test_decommit( + &mut self, + address: Address, + bytecode_hash: H256, + expected_keccak_hash: H256, + ) -> Transaction; } -impl AccountLoadNextExecutable for Account { +impl AccountExt for Account { fn deploy_loadnext_tx(&mut self) -> DeployContractsTx { - let loadnext_contract = get_loadnext_contract(); + let loadnext_contract = TestContract::load_test(); let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; self.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, + loadnext_contract.bytecode, Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), + loadnext_contract.factory_deps(), TxType::L2, ) } fn l1_execute(&mut self, serial_id: PriorityOpId) -> Transaction { - testonly::l1_transaction(self, serial_id) + self.get_l1_tx(Execute::transfer(Address::random(), 0.into()), serial_id.0) } /// Returns a valid `execute` transaction. @@ -399,17 +404,17 @@ impl AccountLoadNextExecutable for Account { /// Returns a transaction to the loadnext contract with custom amount of write requests. /// Increments the account nonce. - fn loadnext_custom_writes_call( + fn loadnext_custom_initial_writes_call( &mut self, address: Address, - writes: u32, + initial_writes: u32, gas_limit: u32, ) -> Transaction { // For each iteration of the expensive contract, there are two slots that are updated: // the length of the vector and the new slot with the element itself. let minimal_fee = 2 * testonly::DEFAULT_GAS_PER_PUBDATA - * writes + * initial_writes * INITIAL_STORAGE_WRITE_PUBDATA_BYTES as u32; let fee = testonly::fee(minimal_fee + gas_limit); @@ -419,7 +424,8 @@ impl AccountLoadNextExecutable for Account { contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, - writes: writes as usize, + initial_writes: initial_writes as usize, + repeated_writes: 100, events: 100, hashes: 100, recursive_calls: 0, @@ -436,7 +442,10 @@ impl AccountLoadNextExecutable for Account { /// Returns a valid `execute` transaction. /// Automatically increments nonce of the account. fn execute_with_gas_limit(&mut self, gas_limit: u32) -> Transaction { - testonly::l2_transaction(self, gas_limit) + self.get_l2_tx_for_execute( + Execute::transfer(Address::random(), 0.into()), + Some(testonly::fee(gas_limit)), + ) } /// Returns a transaction to the loadnext contract with custom gas limit and expected burned gas amount. @@ -454,17 +463,78 @@ impl AccountLoadNextExecutable for Account { Execute { contract_address: Some(address), calldata, - value: Default::default(), + value: 0.into(), factory_deps: vec![], }, Some(fee), ) } + + fn deploy_failed_call_tx(&mut self) -> DeployContractsTx { + self.get_deploy_tx(TestContract::failed_call().bytecode, None, TxType::L2) + } + + fn deploy_storage_tester(&mut self) -> DeployContractsTx { + self.get_deploy_tx(TestContract::storage_test().bytecode, None, TxType::L2) + } + + fn test_transient_store(&mut self, address: Address) -> Transaction { + let test_fn = TestContract::storage_test().function("testTransientStore"); + let calldata = test_fn.encode_input(&[]).unwrap(); + self.get_l2_tx_for_execute( + Execute { + contract_address: Some(address), + calldata, + value: 0.into(), + factory_deps: vec![], + }, + None, + ) + } + + fn assert_transient_value(&mut self, address: Address, expected: U256) -> Transaction { + let assert_fn = TestContract::storage_test().function("assertTValue"); + let calldata = assert_fn.encode_input(&[Token::Uint(expected)]).unwrap(); + self.get_l2_tx_for_execute( + Execute { + contract_address: Some(address), + calldata, + value: 0.into(), + factory_deps: vec![], + }, + None, + ) + } + + fn deploy_precompiles_test(&mut self) -> DeployContractsTx { + self.get_deploy_tx(TestContract::precompiles_test().bytecode, None, TxType::L2) + } + + fn test_decommit( + &mut self, + address: Address, + bytecode_hash: H256, + expected_keccak_hash: H256, + ) -> Transaction { + let assert_fn = TestContract::precompiles_test().function("callCodeOracle"); + let calldata = assert_fn.encode_input(&[ + Token::FixedBytes(bytecode_hash.0.to_vec()), + Token::FixedBytes(expected_keccak_hash.0.to_vec()), + ]); + self.get_l2_tx_for_execute( + Execute { + contract_address: Some(address), + calldata: calldata.unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ) + } } pub fn mock_loadnext_gas_burn_calldata(gas: u32) -> Vec { - let loadnext_contract = get_loadnext_contract(); - let contract_function = loadnext_contract.contract.function("burnGas").unwrap(); + let contract_function = TestContract::load_test().function("burnGas"); let params = vec![Token::Uint(U256::from(gas))]; contract_function .encode_input(¶ms) @@ -594,7 +664,8 @@ impl StorageSnapshot { L1BatchNumber(1), self.l2_block_number, snapshot_logs, - GenesisParams::mock(), + &BASE_SYSTEM_CONTRACTS, + ProtocolVersionId::latest(), ); let mut snapshot = recover(&mut storage, snapshot).await; snapshot.l2_block_hash = self.l2_block_hash; diff --git a/core/node/state_keeper/src/io/common/mod.rs b/core/node/state_keeper/src/io/common/mod.rs index 6bd881414a20..867ffa7fb371 100644 --- a/core/node/state_keeper/src/io/common/mod.rs +++ b/core/node/state_keeper/src/io/common/mod.rs @@ -3,7 +3,7 @@ use std::time::Duration; use anyhow::Context; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; -use zksync_types::{L1BatchNumber, L2BlockNumber, H256}; +use zksync_types::{commitment::PubdataParams, L1BatchNumber, L2BlockNumber, H256}; use super::PendingBatchData; @@ -85,6 +85,7 @@ pub async fn load_pending_batch( storage: &mut Connection<'_, Core>, system_env: SystemEnv, l1_batch_env: L1BatchEnv, + pubdata_params: PubdataParams, ) -> anyhow::Result { let pending_l2_blocks = storage .transactions_dal() @@ -104,6 +105,7 @@ pub async fn load_pending_batch( Ok(PendingBatchData { l1_batch_env, system_env, + pubdata_params, pending_l2_blocks, }) } diff --git a/core/node/state_keeper/src/io/common/tests.rs b/core/node/state_keeper/src/io/common/tests.rs index b2a24acb4956..2298d4c2ee74 100644 --- a/core/node/state_keeper/src/io/common/tests.rs +++ b/core/node/state_keeper/src/io/common/tests.rs @@ -9,7 +9,7 @@ use futures::FutureExt; use zksync_config::GenesisConfig; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, Core}; -use zksync_multivm::interface::TransactionExecutionMetrics; +use zksync_multivm::interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{ create_l1_batch, create_l2_block, create_l2_transaction, execute_l2_transaction, @@ -318,7 +318,7 @@ async fn loading_pending_batch_with_genesis() { .await; let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); - let (system_env, l1_batch_env) = provider + let (system_env, l1_batch_env, pubdata_params) = provider .load_l1_batch_env( &mut storage, L1BatchNumber(1), @@ -331,7 +331,7 @@ async fn loading_pending_batch_with_genesis() { assert_eq!(l1_batch_env.first_l2_block.number, 1); - let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) + let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) .await .unwrap(); @@ -355,7 +355,11 @@ async fn store_pending_l2_blocks( let tx = create_l2_transaction(10, 100); storage .transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); let mut new_l2_block = create_l2_block(l2_block_number); @@ -396,7 +400,7 @@ async fn loading_pending_batch_after_snapshot_recovery() { .await; let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); - let (system_env, l1_batch_env) = provider + let (system_env, l1_batch_env, pubdata_params) = provider .load_l1_batch_env( &mut storage, snapshot_recovery.l1_batch_number + 1, @@ -406,7 +410,7 @@ async fn loading_pending_batch_after_snapshot_recovery() { .await .unwrap() .expect("no L1 batch"); - let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) + let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) .await .unwrap(); diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index 229f54132f76..991ecee699c3 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -14,11 +14,12 @@ use zksync_mempool::L2TxFilter; use zksync_multivm::{interface::Halt, utils::derive_base_fee_and_gas_per_pubdata}; use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_types::{ - block::UnsealedL1BatchHeader, protocol_upgrade::ProtocolUpgradeTx, utils::display_timestamp, + block::UnsealedL1BatchHeader, + commitment::{L1BatchCommitmentMode, PubdataParams}, + protocol_upgrade::ProtocolUpgradeTx, + utils::display_timestamp, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, U256, }; -// TODO (SMA-1206): use seconds instead of milliseconds. -use zksync_utils::time::millis_since_epoch; use zksync_vm_executor::storage::L1BatchParamsProvider; use crate::{ @@ -33,6 +34,7 @@ use crate::{ IoSealCriteria, L2BlockMaxPayloadSizeSealer, TimeoutSealer, UnexecutableReason, }, updates::UpdatesManager, + utils::millis_since_epoch, MempoolGuard, }; @@ -55,6 +57,8 @@ pub struct MempoolIO { // Used to keep track of gas prices to set accepted price per pubdata byte in blocks. batch_fee_input_provider: Arc, chain_id: L2ChainId, + l2_da_validator_address: Option
, + pubdata_type: L1BatchCommitmentMode, } impl IoSealCriteria for MempoolIO { @@ -97,7 +101,7 @@ impl StateKeeperIO for MempoolIO { L2BlockSealProcess::clear_pending_l2_block(&mut storage, cursor.next_l2_block - 1).await?; - let Some((system_env, l1_batch_env)) = self + let Some((system_env, l1_batch_env, pubdata_params)) = self .l1_batch_params_provider .load_l1_batch_env( &mut storage, @@ -109,26 +113,24 @@ impl StateKeeperIO for MempoolIO { else { return Ok((cursor, None)); }; - let pending_batch_data = load_pending_batch(&mut storage, system_env, l1_batch_env) - .await - .with_context(|| { - format!( - "failed loading data for re-execution for pending L1 batch #{}", - cursor.l1_batch - ) - })?; + let pending_batch_data = + load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) + .await + .with_context(|| { + format!( + "failed loading data for re-execution for pending L1 batch #{}", + cursor.l1_batch + ) + })?; - let PendingBatchData { - l1_batch_env, - system_env, - pending_l2_blocks, - } = pending_batch_data; // Initialize the filter for the transactions that come after the pending batch. // We use values from the pending block to match the filter with one used before the restart. - let (base_fee, gas_per_pubdata) = - derive_base_fee_and_gas_per_pubdata(l1_batch_env.fee_input, system_env.version.into()); + let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( + pending_batch_data.l1_batch_env.fee_input, + pending_batch_data.system_env.version.into(), + ); self.filter = L2TxFilter { - fee_input: l1_batch_env.fee_input, + fee_input: pending_batch_data.l1_batch_env.fee_input, fee_per_gas: base_fee, gas_per_pubdata: gas_per_pubdata as u32, }; @@ -136,20 +138,14 @@ impl StateKeeperIO for MempoolIO { storage .blocks_dal() .ensure_unsealed_l1_batch_exists( - l1_batch_env + pending_batch_data + .l1_batch_env .clone() - .into_unsealed_header(Some(system_env.version)), + .into_unsealed_header(Some(pending_batch_data.system_env.version)), ) .await?; - Ok(( - cursor, - Some(PendingBatchData { - l1_batch_env, - system_env, - pending_l2_blocks, - }), - )) + Ok((cursor, Some(pending_batch_data))) } async fn wait_for_new_batch_params( @@ -166,10 +162,11 @@ impl StateKeeperIO for MempoolIO { .get_unsealed_l1_batch() .await? { + let protocol_version = unsealed_storage_batch + .protocol_version + .context("unsealed batch is missing protocol version")?; return Ok(Some(L1BatchParams { - protocol_version: unsealed_storage_batch - .protocol_version - .expect("unsealed batch is missing protocol version"), + protocol_version, validation_computational_gas_limit: self.validation_computational_gas_limit, operator_address: unsealed_storage_batch.fee_address, fee_input: unsealed_storage_batch.fee_input, @@ -178,6 +175,7 @@ impl StateKeeperIO for MempoolIO { // This value is effectively ignored by the protocol. virtual_blocks: 1, }, + pubdata_params: self.pubdata_params(protocol_version)?, })); } @@ -247,6 +245,7 @@ impl StateKeeperIO for MempoolIO { // This value is effectively ignored by the protocol. virtual_blocks: 1, }, + pubdata_params: self.pubdata_params(protocol_version)?, })); } Ok(None) @@ -278,6 +277,7 @@ impl StateKeeperIO for MempoolIO { async fn wait_for_next_tx( &mut self, max_wait: Duration, + l2_block_timestamp: u64, ) -> anyhow::Result> { let started_at = Instant::now(); while started_at.elapsed() <= max_wait { @@ -285,7 +285,7 @@ impl StateKeeperIO for MempoolIO { let maybe_tx = self.mempool.next_transaction(&self.filter); get_latency.observe(); - if let Some(tx) = maybe_tx { + if let Some((tx, constraint)) = maybe_tx { // Reject transactions with too big gas limit. They are also rejected on the API level, but // we need to secure ourselves in case some tx will somehow get into mempool. if tx.gas_limit() > self.max_allowed_tx_gas_limit { @@ -298,6 +298,23 @@ impl StateKeeperIO for MempoolIO { .await?; continue; } + + // Reject transactions that violate block.timestamp constraints. Such transactions should be + // rejected at the API level, but we need to protect ourselves in case if a transaction + // goes outside of the allowed range while being in the mempool + let matches_range = constraint + .timestamp_asserter_range + .map_or(true, |x| x.contains(&l2_block_timestamp)); + + if !matches_range { + self.reject( + &tx, + UnexecutableReason::Halt(Halt::FailedBlockTimestampAssertion), + ) + .await?; + continue; + } + return Ok(Some(tx)); } else { tokio::time::sleep(self.delay_interval).await; @@ -309,9 +326,9 @@ impl StateKeeperIO for MempoolIO { async fn rollback(&mut self, tx: Transaction) -> anyhow::Result<()> { // Reset nonces in the mempool. - self.mempool.rollback(&tx); + let constraint = self.mempool.rollback(&tx); // Insert the transaction back. - self.mempool.insert(vec![tx], HashMap::new()); + self.mempool.insert(vec![(tx, constraint)], HashMap::new()); Ok(()) } @@ -454,6 +471,7 @@ async fn sleep_past(timestamp: u64, l2_block: L2BlockNumber) -> u64 { } impl MempoolIO { + #[allow(clippy::too_many_arguments)] pub fn new( mempool: MempoolGuard, batch_fee_input_provider: Arc, @@ -462,6 +480,8 @@ impl MempoolIO { fee_account: Address, delay_interval: Duration, chain_id: L2ChainId, + l2_da_validator_address: Option
, + pubdata_type: L1BatchCommitmentMode, ) -> anyhow::Result { Ok(Self { mempool, @@ -477,8 +497,26 @@ impl MempoolIO { delay_interval, batch_fee_input_provider, chain_id, + l2_da_validator_address, + pubdata_type, }) } + + fn pubdata_params(&self, protocol_version: ProtocolVersionId) -> anyhow::Result { + let pubdata_params = match ( + protocol_version.is_pre_gateway(), + self.l2_da_validator_address, + ) { + (true, _) => PubdataParams::default(), + (false, Some(l2_da_validator_address)) => PubdataParams { + l2_da_validator_address, + pubdata_type: self.pubdata_type, + }, + (false, None) => anyhow::bail!("L2 DA validator address not found"), + }; + + Ok(pubdata_params) + } } /// Getters required for testing the MempoolIO. @@ -492,9 +530,9 @@ impl MempoolIO { #[cfg(test)] mod tests { use tokio::time::timeout_at; - use zksync_utils::time::seconds_since_epoch; use super::*; + use crate::tests::seconds_since_epoch; // This test defensively uses large deadlines in order to account for tests running in parallel etc. #[tokio::test] diff --git a/core/node/state_keeper/src/io/mod.rs b/core/node/state_keeper/src/io/mod.rs index 0fc5ebb6c082..fbc481fb678d 100644 --- a/core/node/state_keeper/src/io/mod.rs +++ b/core/node/state_keeper/src/io/mod.rs @@ -4,8 +4,9 @@ use async_trait::async_trait; use zksync_contracts::BaseSystemContracts; use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; use zksync_types::{ - block::L2BlockExecutionData, fee_model::BatchFeeInput, protocol_upgrade::ProtocolUpgradeTx, - Address, L1BatchNumber, L2ChainId, ProtocolVersionId, Transaction, H256, + block::L2BlockExecutionData, commitment::PubdataParams, fee_model::BatchFeeInput, + protocol_upgrade::ProtocolUpgradeTx, Address, L1BatchNumber, L2ChainId, ProtocolVersionId, + Transaction, H256, }; use zksync_vm_executor::storage::l1_batch_params; @@ -38,6 +39,7 @@ pub struct PendingBatchData { /// (e.g. timestamp) are the same, so transaction would have the same result after re-execution. pub(crate) l1_batch_env: L1BatchEnv, pub(crate) system_env: SystemEnv, + pub(crate) pubdata_params: PubdataParams, /// List of L2 blocks and corresponding transactions that were executed within batch. pub(crate) pending_l2_blocks: Vec, } @@ -70,6 +72,8 @@ pub struct L1BatchParams { pub fee_input: BatchFeeInput, /// Parameters of the first L2 block in the batch. pub first_l2_block: L2BlockParams, + /// Params related to how the pubdata should be processed by the bootloader in the batch. + pub pubdata_params: PubdataParams, } impl L1BatchParams { @@ -79,8 +83,8 @@ impl L1BatchParams { contracts: BaseSystemContracts, cursor: &IoCursor, previous_batch_hash: H256, - ) -> (SystemEnv, L1BatchEnv) { - l1_batch_params( + ) -> (SystemEnv, L1BatchEnv, PubdataParams) { + let (system_env, l1_batch_env) = l1_batch_params( cursor.l1_batch, self.operator_address, self.first_l2_block.timestamp, @@ -93,7 +97,9 @@ impl L1BatchParams { self.protocol_version, self.first_l2_block.virtual_blocks, chain_id, - ) + ); + + (system_env, l1_batch_env, self.pubdata_params) } } @@ -131,8 +137,11 @@ pub trait StateKeeperIO: 'static + Send + Sync + fmt::Debug + IoSealCriteria { /// Blocks for up to `max_wait` until the next transaction is available for execution. /// Returns `None` if no transaction became available until the timeout. - async fn wait_for_next_tx(&mut self, max_wait: Duration) - -> anyhow::Result>; + async fn wait_for_next_tx( + &mut self, + max_wait: Duration, + l2_block_timestamp: u64, + ) -> anyhow::Result>; /// Marks the transaction as "not executed", so it can be retrieved from the IO again. async fn rollback(&mut self, tx: Transaction) -> anyhow::Result<()>; /// Marks the transaction as "rejected", e.g. one that is not correct and can't be executed. diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index 3e11285e11f1..d8fd99bfc95d 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -7,8 +7,7 @@ use async_trait::async_trait; use tokio::sync::{mpsc, oneshot}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_shared_metrics::{BlockStage, APP_METRICS}; -use zksync_types::{writes::TreeWrite, Address}; -use zksync_utils::u256_to_h256; +use zksync_types::{u256_to_h256, writes::TreeWrite, Address, ProtocolVersionId}; use crate::{ io::{ @@ -29,7 +28,7 @@ struct Completable { #[derive(Debug)] pub struct StateKeeperPersistence { pool: ConnectionPool, - l2_shared_bridge_addr: Address, + l2_legacy_shared_bridge_addr: Option
, pre_insert_txs: bool, insert_protective_reads: bool, commands_sender: mpsc::Sender>, @@ -41,13 +40,45 @@ pub struct StateKeeperPersistence { impl StateKeeperPersistence { const SHUTDOWN_MSG: &'static str = "L2 block sealer unexpectedly shut down"; + async fn validate_l2_legacy_shared_bridge_addr( + pool: &ConnectionPool, + l2_legacy_shared_bridge_addr: Option
, + ) -> anyhow::Result<()> { + let mut connection = pool.connection_tagged("state_keeper").await?; + + if let Some(l2_block) = connection + .blocks_dal() + .get_earliest_l2_block_number() + .await + .context("failed to load earliest l2 block number")? + { + let header = connection + .blocks_dal() + .get_l2_block_header(l2_block) + .await + .context("failed to load L2 block header")? + .context("missing L2 block header")?; + let protocol_version = header + .protocol_version + .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); + + if protocol_version.is_pre_gateway() && l2_legacy_shared_bridge_addr.is_none() { + anyhow::bail!("Missing `l2_legacy_shared_bridge_addr` for chain that was initialized before gateway upgrade"); + } + } + + Ok(()) + } + /// Creates a sealer that will use the provided Postgres connection and will have the specified /// `command_capacity` for unprocessed sealing commands. - pub fn new( + pub async fn new( pool: ConnectionPool, - l2_shared_bridge_addr: Address, + l2_legacy_shared_bridge_addr: Option
, mut command_capacity: usize, - ) -> (Self, L2BlockSealerTask) { + ) -> anyhow::Result<(Self, L2BlockSealerTask)> { + Self::validate_l2_legacy_shared_bridge_addr(&pool, l2_legacy_shared_bridge_addr).await?; + let is_sync = command_capacity == 0; command_capacity = command_capacity.max(1); @@ -60,14 +91,14 @@ impl StateKeeperPersistence { }; let this = Self { pool, - l2_shared_bridge_addr, + l2_legacy_shared_bridge_addr, pre_insert_txs: false, insert_protective_reads: true, commands_sender, latest_completion_receiver: None, is_sync, }; - (this, sealer) + Ok((this, sealer)) } pub fn with_tx_insertion(mut self) -> Self { @@ -157,8 +188,8 @@ impl StateKeeperOutputHandler for StateKeeperPersistence { } async fn handle_l2_block(&mut self, updates_manager: &UpdatesManager) -> anyhow::Result<()> { - let command = - updates_manager.seal_l2_block_command(self.l2_shared_bridge_addr, self.pre_insert_txs); + let command = updates_manager + .seal_l2_block_command(self.l2_legacy_shared_bridge_addr, self.pre_insert_txs); self.submit_l2_block(command).await; Ok(()) } @@ -174,7 +205,7 @@ impl StateKeeperOutputHandler for StateKeeperPersistence { updates_manager .seal_l1_batch( self.pool.clone(), - self.l2_shared_bridge_addr, + self.l2_legacy_shared_bridge_addr, self.insert_protective_reads, ) .await @@ -347,7 +378,7 @@ impl StateKeeperOutputHandler for TreeWritesPersistence { #[cfg(test)] mod tests { - use std::collections::{HashMap, HashSet}; + use std::collections::HashSet; use assert_matches::assert_matches; use futures::FutureExt; @@ -355,10 +386,9 @@ mod tests { use zksync_multivm::interface::{FinishedL1Batch, VmExecutionMetrics}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_types::{ - api::TransactionStatus, block::BlockGasCount, writes::StateDiffRecord, L1BatchNumber, - L2BlockNumber, StorageLogKind, H256, U256, + api::TransactionStatus, block::BlockGasCount, h256_to_u256, writes::StateDiffRecord, + L1BatchNumber, L2BlockNumber, StorageLogKind, H256, U256, }; - use zksync_utils::h256_to_u256; use super::*; use crate::{ @@ -392,8 +422,13 @@ mod tests { .unwrap(); drop(storage); - let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(pool.clone(), Address::default(), l2_block_sealer_capacity); + let (persistence, l2_block_sealer) = StateKeeperPersistence::new( + pool.clone(), + Some(Address::default()), + l2_block_sealer_capacity, + ) + .await + .unwrap(); let mut output_handler = OutputHandler::new(Box::new(persistence)) .with_handler(Box::new(TreeWritesPersistence::new(pool.clone()))); tokio::spawn(l2_block_sealer.run()); @@ -451,7 +486,8 @@ mod tests { pool: &ConnectionPool, ) -> H256 { let l1_batch_env = default_l1_batch_env(1, 1, Address::random()); - let mut updates = UpdatesManager::new(&l1_batch_env, &default_system_env()); + let mut updates = + UpdatesManager::new(&l1_batch_env, &default_system_env(), Default::default()); pool.connection() .await .unwrap() @@ -472,7 +508,6 @@ mod tests { tx, tx_result, vec![], - HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], @@ -538,7 +573,9 @@ mod tests { drop(storage); let (mut persistence, l2_block_sealer) = - StateKeeperPersistence::new(pool.clone(), Address::default(), 1); + StateKeeperPersistence::new(pool.clone(), Some(Address::default()), 1) + .await + .unwrap(); persistence = persistence.with_tx_insertion().without_protective_reads(); let mut output_handler = OutputHandler::new(Box::new(persistence)); tokio::spawn(l2_block_sealer.run()); @@ -577,11 +614,13 @@ mod tests { async fn l2_block_sealer_handle_blocking() { let pool = ConnectionPool::constrained_test_pool(1).await; let (mut persistence, mut sealer) = - StateKeeperPersistence::new(pool, Address::default(), 1); + StateKeeperPersistence::new(pool, Some(Address::default()), 1) + .await + .unwrap(); // The first command should be successfully submitted immediately. let mut updates_manager = create_updates_manager(); - let seal_command = updates_manager.seal_l2_block_command(Address::default(), false); + let seal_command = updates_manager.seal_l2_block_command(Some(Address::default()), false); persistence.submit_l2_block(seal_command).await; // The second command should lead to blocking @@ -589,7 +628,7 @@ mod tests { timestamp: 2, virtual_blocks: 1, }); - let seal_command = updates_manager.seal_l2_block_command(Address::default(), false); + let seal_command = updates_manager.seal_l2_block_command(Some(Address::default()), false); { let submit_future = persistence.submit_l2_block(seal_command); futures::pin_mut!(submit_future); @@ -617,7 +656,7 @@ mod tests { timestamp: 3, virtual_blocks: 1, }); - let seal_command = updates_manager.seal_l2_block_command(Address::default(), false); + let seal_command = updates_manager.seal_l2_block_command(Some(Address::default()), false); persistence.submit_l2_block(seal_command).await; let command = sealer.commands_receiver.recv().await.unwrap(); command.completion_sender.send(()).unwrap(); @@ -628,12 +667,15 @@ mod tests { async fn l2_block_sealer_handle_parallel_processing() { let pool = ConnectionPool::constrained_test_pool(1).await; let (mut persistence, mut sealer) = - StateKeeperPersistence::new(pool, Address::default(), 5); + StateKeeperPersistence::new(pool, Some(Address::default()), 5) + .await + .unwrap(); // 5 L2 block sealing commands can be submitted without blocking. let mut updates_manager = create_updates_manager(); for i in 1..=5 { - let seal_command = updates_manager.seal_l2_block_command(Address::default(), false); + let seal_command = + updates_manager.seal_l2_block_command(Some(Address::default()), false); updates_manager.push_l2_block(L2BlockParams { timestamp: i, virtual_blocks: 1, diff --git a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs index 7ef466805e36..a6356a838602 100644 --- a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs +++ b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs @@ -3,13 +3,12 @@ use async_trait::async_trait; use once_cell::sync::Lazy; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::interface::VmEvent; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_system_constants::{CONTRACT_DEPLOYER_ADDRESS, L2_NATIVE_TOKEN_VAULT_ADDRESS}; use zksync_types::{ - ethabi, + ethabi, h256_to_address, tokens::{TokenInfo, TokenMetadata}, Address, L2BlockNumber, H256, }; -use zksync_utils::h256_to_account_address; use crate::{ io::seal_logic::SealStrategy, @@ -18,7 +17,7 @@ use crate::{ }; fn extract_added_tokens( - l2_shared_bridge_addr: Address, + l2_token_deployer_addr: Address, all_generated_events: &[VmEvent], ) -> Vec { let deployed_tokens = all_generated_events @@ -28,9 +27,9 @@ fn extract_added_tokens( event.address == CONTRACT_DEPLOYER_ADDRESS && event.indexed_topics.len() == 4 && event.indexed_topics[0] == VmEvent::DEPLOY_EVENT_SIGNATURE - && h256_to_account_address(&event.indexed_topics[1]) == l2_shared_bridge_addr + && h256_to_address(&event.indexed_topics[1]) == l2_token_deployer_addr }) - .map(|event| h256_to_account_address(&event.indexed_topics[3])); + .map(|event| h256_to_address(&event.indexed_topics[3])); extract_added_token_info_from_addresses(all_generated_events, deployed_tokens) } @@ -73,7 +72,7 @@ fn extract_added_token_info_from_addresses( || event.indexed_topics[0] == *BRIDGE_INITIALIZATION_SIGNATURE_OLD) }) .map(|event| { - let l1_token_address = h256_to_account_address(&event.indexed_topics[1]); + let l1_token_address = h256_to_address(&event.indexed_topics[1]); let mut dec_ev = ethabi::decode( &[ ethabi::ParamType::String, @@ -334,8 +333,10 @@ impl L2BlockSealSubtask for InsertTokensSubtask { ) -> anyhow::Result<()> { let is_fictive = command.is_l2_block_fictive(); let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::ExtractAddedTokens, is_fictive); - let added_tokens = - extract_added_tokens(command.l2_shared_bridge_addr, &command.l2_block.events); + let token_deployer_address = command + .l2_legacy_shared_bridge_addr + .unwrap_or(L2_NATIVE_TOKEN_VAULT_ADDRESS); + let added_tokens = extract_added_tokens(token_deployer_address, &command.l2_block.events); progress.observe(added_tokens.len()); let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::InsertTokens, is_fictive); @@ -456,7 +457,7 @@ impl L2BlockSealSubtask for InsertL2ToL1LogsSubtask { mod tests { use zksync_dal::{ConnectionPool, Core}; use zksync_multivm::{ - interface::{TransactionExecutionResult, TxExecutionStatus}, + interface::{tracer::ValidationTraces, TransactionExecutionResult, TxExecutionStatus}, utils::{get_max_batch_gas_limit, get_max_gas_per_pubdata_byte}, zk_evm_latest::ethereum_types::H256, VmVersion, @@ -464,11 +465,12 @@ mod tests { use zksync_node_test_utils::create_l2_transaction; use zksync_types::{ block::L2BlockHeader, + commitment::PubdataParams, + h256_to_u256, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, AccountTreeId, Address, L1BatchNumber, ProtocolVersionId, StorageKey, StorageLog, StorageLogKind, StorageLogWithPreviousValue, }; - use zksync_utils::h256_to_u256; use super::*; use crate::updates::L2BlockUpdates; @@ -484,7 +486,7 @@ mod tests { .await .unwrap() .transactions_dal() - .insert_transaction_l2(&tx, Default::default()) + .insert_transaction_l2(&tx, Default::default(), ValidationTraces::default()) .await .unwrap(); let tx_hash = tx.hash(); @@ -552,8 +554,9 @@ mod tests { base_fee_per_gas: Default::default(), base_system_contracts_hashes: Default::default(), protocol_version: Some(ProtocolVersionId::latest()), - l2_shared_bridge_addr: Default::default(), + l2_legacy_shared_bridge_addr: Default::default(), pre_insert_txs: false, + pubdata_params: PubdataParams::default(), }; // Run. @@ -616,6 +619,7 @@ mod tests { virtual_blocks: l2_block_seal_command.l2_block.virtual_blocks, gas_limit: get_max_batch_gas_limit(VmVersion::latest()), logs_bloom: Default::default(), + pubdata_params: l2_block_seal_command.pubdata_params, }; connection .protocol_versions_dal() diff --git a/core/node/state_keeper/src/io/seal_logic/mod.rs b/core/node/state_keeper/src/io/seal_logic/mod.rs index 5859d27786d9..419413e127d3 100644 --- a/core/node/state_keeper/src/io/seal_logic/mod.rs +++ b/core/node/state_keeper/src/io/seal_logic/mod.rs @@ -22,11 +22,11 @@ use zksync_types::{ helpers::unix_timestamp_ms, l2_to_l1_log::UserL2ToL1Log, tx::IncludedTxLocation, + u256_to_h256, utils::display_timestamp, Address, BloomInput, ExecuteTransactionCommon, ProtocolVersionId, StorageKey, StorageLog, Transaction, H256, }; -use zksync_utils::u256_to_h256; use crate::{ io::seal_logic::l2_block_seal_subtasks::L2BlockSealProcess, @@ -46,7 +46,7 @@ impl UpdatesManager { pub(super) async fn seal_l1_batch( &self, pool: ConnectionPool, - l2_shared_bridge_addr: Address, + l2_legacy_shared_bridge_addr: Option
, insert_protective_reads: bool, ) -> anyhow::Result<()> { let started_at = Instant::now(); @@ -59,7 +59,7 @@ impl UpdatesManager { let progress = L1_BATCH_METRICS.start(L1BatchSealStage::FictiveL2Block); // Seal fictive L2 block with last events and storage logs. let l2_block_command = self.seal_l2_block_command( - l2_shared_bridge_addr, + l2_legacy_shared_bridge_addr, false, // fictive L2 blocks don't have txs, so it's fine to pass `false` here. ); @@ -335,8 +335,6 @@ impl L2BlockSealCommand { /// that are created after the last processed tx in the L1 batch: after the last transaction is processed, /// the bootloader enters the "tip" phase in which it can still generate events (e.g., /// one for sending fees to the operator). - /// - /// `l2_shared_bridge_addr` is required to extract the information on newly added tokens. async fn seal_inner( &self, strategy: &mut SealStrategy<'_>, @@ -393,6 +391,7 @@ impl L2BlockSealCommand { virtual_blocks: self.l2_block.virtual_blocks, gas_limit: get_max_batch_gas_limit(definite_vm_version), logs_bloom, + pubdata_params: self.pubdata_params, }; let mut connection = strategy.connection().await?; diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index 566eebf7ab72..5a44bf71ad39 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -1,29 +1,33 @@ -use std::{collections::HashMap, time::Duration}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; use test_casing::test_casing; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_mempool::L2TxFilter; use zksync_multivm::{ - interface::{TransactionExecutionMetrics, VmEvent, VmExecutionMetrics}, + interface::{ + tracer::ValidationTraces, TransactionExecutionMetrics, VmEvent, VmExecutionMetrics, + }, utils::derive_base_fee_and_gas_per_pubdata, }; use zksync_node_test_utils::prepare_recovery_snapshot; +use zksync_system_constants::KNOWN_CODES_STORAGE_ADDRESS; use zksync_types::{ block::{BlockGasCount, L2BlockHasher}, - commitment::L1BatchCommitmentMode, + bytecode::BytecodeHash, + commitment::{L1BatchCommitmentMode, PubdataParams}, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, + l2::L2Tx, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersion, - ProtocolVersionId, StorageKey, H256, U256, + ProtocolVersionId, StorageKey, TransactionTimeRangeConstraint, H256, U256, }; -use zksync_utils::time::seconds_since_epoch; use self::tester::Tester; use crate::{ io::{seal_logic::l2_block_seal_subtasks::L2BlockSealProcess, StateKeeperIO}, mempool_actor::l2_tx_filter, testonly::BASE_SYSTEM_CONTRACTS, - tests::{create_execution_result, create_transaction, Query}, + tests::{create_execution_result, create_transaction, seconds_since_epoch, Query}, updates::{L2BlockSealCommand, L2BlockUpdates, UpdatesManager}, StateKeeperOutputHandler, StateKeeperPersistence, }; @@ -130,6 +134,7 @@ async fn test_filter_with_no_pending_batch(commitment_mode: L1BatchCommitmentMod &mut guard, want_filter.fee_per_gas, want_filter.gas_per_pubdata, + TransactionTimeRangeConstraint::default(), ); // Now, given that there is a transaction matching the expected filter, waiting for the new batch params @@ -169,7 +174,12 @@ async fn test_timestamps_are_distinct( ) .await .unwrap(); - tester.insert_tx(&mut guard, tx_filter.fee_per_gas, tx_filter.gas_per_pubdata); + tester.insert_tx( + &mut guard, + tx_filter.fee_per_gas, + tx_filter.gas_per_pubdata, + TransactionTimeRangeConstraint::default(), + ); let l1_batch_params = mempool .wait_for_new_batch_params(&io_cursor, Duration::from_secs(10)) @@ -217,6 +227,29 @@ async fn l1_batch_timestamp_respects_prev_l2_block_with_clock_skew( test_timestamps_are_distinct(connection_pool, current_timestamp + 2, true, tester).await; } +fn create_block_seal_command( + l1_batch_number: L1BatchNumber, + l2_block: L2BlockUpdates, +) -> L2BlockSealCommand { + L2BlockSealCommand { + l1_batch_number, + l2_block, + first_tx_index: 0, + fee_account_address: Address::repeat_byte(0x23), + fee_input: BatchFeeInput::PubdataIndependent(PubdataIndependentBatchFeeModelInput { + l1_gas_price: 100, + fair_l2_gas_price: 100, + fair_pubdata_price: 100, + }), + base_fee_per_gas: 10, + base_system_contracts_hashes: BaseSystemContractsHashes::default(), + protocol_version: Some(ProtocolVersionId::latest()), + l2_legacy_shared_bridge_addr: Some(Address::default()), + pre_insert_txs: false, + pubdata_params: PubdataParams::default(), + } +} + #[tokio::test] async fn processing_storage_logs_when_sealing_l2_block() { let connection_pool = @@ -249,7 +282,6 @@ async fn processing_storage_logs_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], - HashMap::new(), vec![], ); @@ -268,27 +300,11 @@ async fn processing_storage_logs_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], - HashMap::new(), vec![], ); let l1_batch_number = L1BatchNumber(2); - let seal_command = L2BlockSealCommand { - l1_batch_number, - l2_block, - first_tx_index: 0, - fee_account_address: Address::repeat_byte(0x23), - fee_input: BatchFeeInput::PubdataIndependent(PubdataIndependentBatchFeeModelInput { - l1_gas_price: 100, - fair_l2_gas_price: 100, - fair_pubdata_price: 100, - }), - base_fee_per_gas: 10, - base_system_contracts_hashes: BaseSystemContractsHashes::default(), - protocol_version: Some(ProtocolVersionId::latest()), - l2_shared_bridge_addr: Address::default(), - pre_insert_txs: false, - }; + let seal_command = create_block_seal_command(l1_batch_number, l2_block); connection_pool .connection() .await @@ -358,27 +374,11 @@ async fn processing_events_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], - HashMap::new(), vec![], ); } - let seal_command = L2BlockSealCommand { - l1_batch_number, - l2_block, - first_tx_index: 0, - fee_account_address: Address::repeat_byte(0x23), - fee_input: BatchFeeInput::PubdataIndependent(PubdataIndependentBatchFeeModelInput { - l1_gas_price: 100, - fair_l2_gas_price: 100, - fair_pubdata_price: 100, - }), - base_fee_per_gas: 10, - base_system_contracts_hashes: BaseSystemContractsHashes::default(), - protocol_version: Some(ProtocolVersionId::latest()), - l2_shared_bridge_addr: Address::default(), - pre_insert_txs: false, - }; + let seal_command = create_block_seal_command(l1_batch_number, l2_block); pool.connection() .await .unwrap() @@ -402,6 +402,120 @@ async fn processing_events_when_sealing_l2_block() { } } +fn bytecode_publishing_events( + l1_batch_number: L1BatchNumber, + tx_index: u32, + bytecode_hashes: impl Iterator, +) -> Vec { + bytecode_hashes + .map(|bytecode_hash| VmEvent { + location: (l1_batch_number, tx_index), + address: KNOWN_CODES_STORAGE_ADDRESS, + indexed_topics: vec![ + VmEvent::PUBLISHED_BYTECODE_SIGNATURE, + bytecode_hash, + H256::from_low_u64_be(1), // sentBytecodeToL1 + ], + value: vec![], + }) + .collect() +} + +#[tokio::test] +async fn processing_dynamic_factory_deps_when_sealing_l2_block() { + let pool = + ConnectionPool::::constrained_test_pool(L2BlockSealProcess::subtasks_len()).await; + let l1_batch_number = L1BatchNumber(2); + let l2_block_number = L2BlockNumber(3); + let mut l2_block = L2BlockUpdates::new( + 0, + l2_block_number, + H256::zero(), + 1, + ProtocolVersionId::latest(), + ); + + let static_factory_deps: Vec<_> = (0_u8..10) + .map(|byte| { + let era_bytecode = vec![byte; 32]; + ( + BytecodeHash::for_bytecode(&era_bytecode).value(), + era_bytecode, + ) + }) + .collect(); + let dynamic_factory_deps: Vec<_> = (0_u8..10) + .map(|byte| { + let evm_bytecode = vec![byte; 96]; + ( + BytecodeHash::for_evm_bytecode(&evm_bytecode).value(), + evm_bytecode, + ) + }) + .collect(); + let mut all_factory_deps = static_factory_deps.clone(); + all_factory_deps.extend_from_slice(&dynamic_factory_deps); + + let events = bytecode_publishing_events( + l1_batch_number, + 0, + static_factory_deps + .iter() + .chain(&dynamic_factory_deps) + .map(|(hash, _)| *hash), + ); + + let mut tx = create_transaction(10, 100); + tx.execute.factory_deps = static_factory_deps + .into_iter() + .map(|(_, bytecode)| bytecode) + .collect(); + let mut execution_result = create_execution_result([]); + execution_result.dynamic_factory_deps = dynamic_factory_deps.into_iter().collect(); + execution_result.logs.events = events; + l2_block.extend_from_executed_transaction( + tx, + execution_result, + BlockGasCount::default(), + VmExecutionMetrics::default(), + vec![], + vec![], + ); + + assert_eq!( + l2_block.new_factory_deps.len(), + all_factory_deps.len(), + "{:?}", + l2_block.new_factory_deps + ); + for (hash, bytecode) in &all_factory_deps { + assert_eq!( + l2_block.new_factory_deps.get(hash), + Some(bytecode), + "{hash:?}" + ); + } + + let seal_command = create_block_seal_command(l1_batch_number, l2_block); + pool.connection() + .await + .unwrap() + .protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + seal_command.seal(pool.clone()).await.unwrap(); + + let mut conn = pool.connection().await.unwrap(); + let persisted_factory_deps = conn + .factory_deps_dal() + .dump_all_factory_deps_for_tests() + .await; + for (hash, bytecode) in &all_factory_deps { + assert_eq!(persisted_factory_deps.get(hash), Some(bytecode), "{hash:?}"); + } +} + #[test_casing(2, COMMITMENT_MODES)] #[tokio::test] async fn l2_block_processing_after_snapshot_recovery(commitment_mode: L1BatchCommitmentMode) { @@ -429,12 +543,9 @@ async fn l2_block_processing_after_snapshot_recovery(commitment_mode: L1BatchCom &mut mempool_guard, tx_filter.fee_per_gas, tx_filter.gas_per_pubdata, + TransactionTimeRangeConstraint::default(), ); - storage - .transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) - .await - .unwrap(); + insert_l2_transaction(&mut storage, &tx).await; let previous_batch_hash = mempool .load_batch_state_hash(snapshot_recovery.l1_batch_number) @@ -447,27 +558,28 @@ async fn l2_block_processing_after_snapshot_recovery(commitment_mode: L1BatchCom .await .unwrap() .expect("no batch params generated"); - let (system_env, l1_batch_env) = l1_batch_params.into_env( + let (system_env, l1_batch_env, pubdata_params) = l1_batch_params.into_env( L2ChainId::default(), BASE_SYSTEM_CONTRACTS.clone(), &cursor, previous_batch_hash, ); - let mut updates = UpdatesManager::new(&l1_batch_env, &system_env); + let mut updates = UpdatesManager::new(&l1_batch_env, &system_env, pubdata_params); let tx_hash = tx.hash(); updates.extend_from_executed_transaction( tx.into(), create_execution_result([]), vec![], - HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], ); let (mut persistence, l2_block_sealer) = - StateKeeperPersistence::new(connection_pool.clone(), Address::default(), 0); + StateKeeperPersistence::new(connection_pool.clone(), Some(Address::default()), 0) + .await + .unwrap(); tokio::spawn(l2_block_sealer.run()); persistence.handle_l2_block(&updates).await.unwrap(); @@ -580,12 +692,9 @@ async fn continue_unsealed_batch_on_restart(commitment_mode: L1BatchCommitmentMo &mut mempool_guard, tx_filter.fee_per_gas, tx_filter.gas_per_pubdata, + TransactionTimeRangeConstraint::default(), ); - storage - .transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) - .await - .unwrap(); + insert_l2_transaction(&mut storage, &tx).await; let old_l1_batch_params = mempool .wait_for_new_batch_params(&cursor, Duration::from_secs(10)) @@ -640,3 +749,118 @@ async fn insert_unsealed_batch_on_init(commitment_mode: L1BatchCommitmentMode) { assert_eq!(l1_batch_params.fee_input, fee_input); assert_eq!(l1_batch_params.first_l2_block.timestamp, 2); } + +#[tokio::test] +async fn test_mempool_with_timestamp_assertion() { + let connection_pool = ConnectionPool::::constrained_test_pool(2).await; + // what commitment mode to use is irrelevant here + let tester = Tester::new(L1BatchCommitmentMode::Rollup); + let mut storage = connection_pool.connection().await.unwrap(); + + tester.genesis(&connection_pool).await; + + // Insert a sealed batch so there will be a `prev_l1_batch_state_root`. + // These gas values are random and don't matter for filter calculation. + let tx_result = tester + .insert_l2_block(&connection_pool, 1, 5, BatchFeeInput::l1_pegged(55, 555)) + .await; + tester + .insert_sealed_batch(&connection_pool, 1, &[tx_result]) + .await; + + // Create a copy of the tx filter that the mempool will use. + let want_filter = l2_tx_filter( + &tester.create_batch_fee_input_provider().await, + ProtocolVersionId::latest().into(), + ) + .await + .unwrap(); + + // Create a mempool without pending batch and ensure that filter is not initialized just yet. + let (mut mempool, mut guard) = tester.create_test_mempool_io(connection_pool).await; + mempool.initialize().await.unwrap(); + assert_eq!(mempool.filter(), &L2TxFilter::default()); + + let system_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time went backwards") + .as_secs(); + + // inserting 3 transactions - a good one, sandwiched in between two bad ones. The good one should + // be returned by wait_for_next_tx, while two bad ones should be rejected. + let rejected_tx_1 = tester.insert_tx( + &mut guard, + want_filter.fee_per_gas, + want_filter.gas_per_pubdata, + TransactionTimeRangeConstraint { + timestamp_asserter_range: Some(system_time - 20000..system_time - 10000), + }, + ); + let expected_tx = tester.insert_tx( + &mut guard, + want_filter.fee_per_gas, + want_filter.gas_per_pubdata, + TransactionTimeRangeConstraint { + timestamp_asserter_range: Some(system_time - 1000..system_time + 1000), + }, + ); + let rejected_tx_2 = tester.insert_tx( + &mut guard, + want_filter.fee_per_gas, + want_filter.gas_per_pubdata, + TransactionTimeRangeConstraint { + timestamp_asserter_range: Some(system_time + 10000..system_time + 20000), + }, + ); + insert_l2_transaction(&mut storage, &rejected_tx_1).await; + insert_l2_transaction(&mut storage, &expected_tx).await; + insert_l2_transaction(&mut storage, &rejected_tx_2).await; + + let tx = mempool + .wait_for_next_tx(Duration::from_secs(2), system_time) + .await + .unwrap() + .expect("No expected transaction in the mempool"); + assert_eq!(expected_tx.hash(), tx.hash()); + + let next_tx = mempool + .wait_for_next_tx(Duration::from_secs(2), system_time) + .await + .expect("Should be no more transactions in the mempool"); + assert!(next_tx.is_none()); + + // verify that two transactions have been rejected + let rejected_storage_tx_1 = storage + .transactions_dal() + .get_storage_tx_by_hash(rejected_tx_1.hash()) + .await + .unwrap() + .expect("Failed to find transaction"); + assert_eq!( + "rejected: Transaction failed block.timestamp assertion", + rejected_storage_tx_1.error.unwrap() + ); + + let rejected_storage_tx_2 = storage + .transactions_dal() + .get_storage_tx_by_hash(rejected_tx_2.hash()) + .await + .unwrap() + .expect("Failed to find transaction"); + assert_eq!( + "rejected: Transaction failed block.timestamp assertion", + rejected_storage_tx_2.error.unwrap() + ); +} + +async fn insert_l2_transaction(storage: &mut Connection<'_, Core>, tx: &L2Tx) { + storage + .transactions_dal() + .insert_transaction_l2( + tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) + .await + .unwrap(); +} diff --git a/core/node/state_keeper/src/io/tests/tester.rs b/core/node/state_keeper/src/io/tests/tester.rs index 062fc426e8cc..32a746eecdfb 100644 --- a/core/node/state_keeper/src/io/tests/tester.rs +++ b/core/node/state_keeper/src/io/tests/tester.rs @@ -4,14 +4,16 @@ use std::{slice, sync::Arc, time::Duration}; use zksync_base_token_adjuster::NoOpRatioProvider; use zksync_config::{ - configs::{chain::StateKeeperConfig, eth_sender::PubdataSendingMode, wallets::Wallets}, + configs::{chain::StateKeeperConfig, wallets::Wallets}, GasAdjusterConfig, }; use zksync_contracts::BaseSystemContracts; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_eth_client::{clients::MockSettlementLayer, BaseFees}; use zksync_multivm::{ - interface::{TransactionExecutionMetrics, TransactionExecutionResult}, + interface::{ + tracer::ValidationTraces, TransactionExecutionMetrics, TransactionExecutionResult, + }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; use zksync_node_fee_model::{ @@ -28,8 +30,10 @@ use zksync_types::{ fee_model::{BatchFeeInput, FeeModelConfig, FeeModelConfigV2}, l2::L2Tx, protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, + pubdata_da::PubdataSendingMode, system_contracts::get_system_smart_contracts, - L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, H256, + L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, TransactionTimeRangeConstraint, + H256, }; use crate::{MempoolGuard, MempoolIO}; @@ -146,6 +150,8 @@ impl Tester { wallets.state_keeper.unwrap().fee_account.address(), Duration::from_secs(1), L2ChainId::from(270), + Some(Default::default()), + Default::default(), ) .unwrap(); @@ -185,7 +191,11 @@ impl Tester { let tx = create_l2_transaction(10, 100); storage .transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); storage @@ -249,9 +259,10 @@ impl Tester { guard: &mut MempoolGuard, fee_per_gas: u64, gas_per_pubdata: u32, + constraint: TransactionTimeRangeConstraint, ) -> L2Tx { let tx = create_l2_transaction(fee_per_gas, gas_per_pubdata.into()); - guard.insert(vec![tx.clone().into()], Default::default()); + guard.insert(vec![(tx.clone().into(), constraint)], Default::default()); tx } } diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index 22f24573070b..fe37ee8d8dd6 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -17,8 +17,9 @@ use zksync_multivm::{ use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_state::{OwnedStorage, ReadStorageFactory}; use zksync_types::{ - block::L2BlockExecutionData, l2::TransactionType, protocol_upgrade::ProtocolUpgradeTx, - protocol_version::ProtocolVersionId, utils::display_timestamp, L1BatchNumber, Transaction, + block::L2BlockExecutionData, commitment::PubdataParams, l2::TransactionType, + protocol_upgrade::ProtocolUpgradeTx, protocol_version::ProtocolVersionId, + utils::display_timestamp, L1BatchNumber, Transaction, }; use crate::{ @@ -116,6 +117,7 @@ impl ZkSyncStateKeeper { let PendingBatchData { mut l1_batch_env, mut system_env, + mut pubdata_params, pending_l2_blocks, } = match pending_batch_params { Some(params) => { @@ -132,7 +134,7 @@ impl ZkSyncStateKeeper { } None => { tracing::info!("There is no open pending batch, starting a new empty batch"); - let (system_env, l1_batch_env) = self + let (system_env, l1_batch_env, pubdata_params) = self .wait_for_new_batch_env(&cursor) .await .map_err(|e| e.context("wait_for_new_batch_params()"))?; @@ -140,18 +142,19 @@ impl ZkSyncStateKeeper { l1_batch_env, pending_l2_blocks: Vec::new(), system_env, + pubdata_params, } } }; let protocol_version = system_env.version; - let mut updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); + let mut updates_manager = UpdatesManager::new(&l1_batch_env, &system_env, pubdata_params); let mut protocol_upgrade_tx: Option = self .load_protocol_upgrade_tx(&pending_l2_blocks, protocol_version, l1_batch_env.number) .await?; let mut batch_executor = self - .create_batch_executor(l1_batch_env.clone(), system_env.clone()) + .create_batch_executor(l1_batch_env.clone(), system_env.clone(), pubdata_params) .await?; self.restore_state( &mut *batch_executor, @@ -201,10 +204,11 @@ impl ZkSyncStateKeeper { // Start the new batch. next_cursor.l1_batch += 1; - (system_env, l1_batch_env) = self.wait_for_new_batch_env(&next_cursor).await?; - updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); + (system_env, l1_batch_env, pubdata_params) = + self.wait_for_new_batch_env(&next_cursor).await?; + updates_manager = UpdatesManager::new(&l1_batch_env, &system_env, pubdata_params); batch_executor = self - .create_batch_executor(l1_batch_env.clone(), system_env.clone()) + .create_batch_executor(l1_batch_env.clone(), system_env.clone(), pubdata_params) .await?; let version_changed = system_env.version != sealed_batch_protocol_version; @@ -221,6 +225,7 @@ impl ZkSyncStateKeeper { &mut self, l1_batch_env: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Result>, Error> { let storage = self .storage_factory @@ -230,7 +235,7 @@ impl ZkSyncStateKeeper { .ok_or(Error::Canceled)?; Ok(self .batch_executor - .init_batch(storage, l1_batch_env, system_env)) + .init_batch(storage, l1_batch_env, system_env, pubdata_params)) } /// This function is meant to be called only once during the state-keeper initialization. @@ -327,7 +332,7 @@ impl ZkSyncStateKeeper { async fn wait_for_new_batch_env( &mut self, cursor: &IoCursor, - ) -> Result<(SystemEnv, L1BatchEnv), Error> { + ) -> Result<(SystemEnv, L1BatchEnv, PubdataParams), Error> { // `io.wait_for_new_batch_params(..)` is not cancel-safe; once we get new batch params, we must hold onto them // until we get the rest of parameters from I/O or receive a stop signal. let params = self.wait_for_new_batch_params(cursor).await?; @@ -498,9 +503,8 @@ impl ZkSyncStateKeeper { updates_manager.extend_from_executed_transaction( tx, - *tx_result.clone(), + *tx_result, compressed_bytecodes, - tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, call_tracer_result, @@ -584,11 +588,10 @@ impl ZkSyncStateKeeper { Self::start_next_l2_block(new_l2_block_params, updates_manager, batch_executor) .await?; } - let waiting_latency = KEEPER_METRICS.waiting_for_tx.start(); let Some(tx) = self .io - .wait_for_next_tx(POLL_WAIT_DURATION) + .wait_for_next_tx(POLL_WAIT_DURATION, updates_manager.l2_block.timestamp) .instrument(info_span!("wait_for_next_tx")) .await .context("error waiting for next transaction")? @@ -625,9 +628,8 @@ impl ZkSyncStateKeeper { } = *tx_metrics; updates_manager.extend_from_executed_transaction( tx, - *tx_result.clone(), + *tx_result, compressed_bytecodes, - tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, call_tracer_result, @@ -687,6 +689,7 @@ impl ZkSyncStateKeeper { tx_result, tx_metrics, compressed_bytecodes, + call_tracer_result, .. } = exec_result else { @@ -706,12 +709,11 @@ impl ZkSyncStateKeeper { } = *tx_metrics; updates_manager.extend_from_executed_transaction( tx, - *tx_result.clone(), + *tx_result, compressed_bytecodes, - tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, - vec![], + call_tracer_result, ); Ok(()) } diff --git a/core/node/state_keeper/src/mempool_actor.rs b/core/node/state_keeper/src/mempool_actor.rs index dbe1e4cb977f..fea1fcf89291 100644 --- a/core/node/state_keeper/src/mempool_actor.rs +++ b/core/node/state_keeper/src/mempool_actor.rs @@ -89,34 +89,55 @@ impl MempoolFetcher { .await .context("failed getting pending protocol version")?; - let l2_tx_filter = l2_tx_filter( - self.batch_fee_input_provider.as_ref(), - protocol_version.into(), - ) - .await - .context("failed creating L2 transaction filter")?; + let (fee_per_gas, gas_per_pubdata) = if let Some(unsealed_batch) = storage + .blocks_dal() + .get_unsealed_l1_batch() + .await + .context("failed getting unsealed batch")? + { + let (fee_per_gas, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( + unsealed_batch.fee_input, + protocol_version.into(), + ); + (fee_per_gas, gas_per_pubdata as u32) + } else { + let filter = l2_tx_filter( + self.batch_fee_input_provider.as_ref(), + protocol_version.into(), + ) + .await + .context("failed creating L2 transaction filter")?; - let transactions = storage + (filter.fee_per_gas, filter.gas_per_pubdata) + }; + + let transactions_with_constraints = storage .transactions_dal() .sync_mempool( &mempool_info.stashed_accounts, &mempool_info.purged_accounts, - l2_tx_filter.gas_per_pubdata, - l2_tx_filter.fee_per_gas, + gas_per_pubdata, + fee_per_gas, self.sync_batch_size, ) .await .context("failed syncing mempool")?; + + let transactions: Vec<_> = transactions_with_constraints + .iter() + .map(|(t, _c)| t) + .collect(); + let nonces = get_transaction_nonces(&mut storage, &transactions).await?; drop(storage); #[cfg(test)] { - let transaction_hashes = transactions.iter().map(Transaction::hash).collect(); + let transaction_hashes = transactions.iter().map(|x| x.hash()).collect(); self.transaction_hashes_sender.send(transaction_hashes).ok(); } let all_transactions_loaded = transactions.len() < self.sync_batch_size; - self.mempool.insert(transactions, nonces); + self.mempool.insert(transactions_with_constraints, nonces); latency.observe(); if all_transactions_loaded { @@ -130,7 +151,7 @@ impl MempoolFetcher { /// Loads nonces for all distinct `transactions` initiators from the storage. async fn get_transaction_nonces( storage: &mut Connection<'_, Core>, - transactions: &[Transaction], + transactions: &[&Transaction], ) -> anyhow::Result> { let (nonce_keys, address_by_nonce_key): (Vec<_>, HashMap<_, _>) = transactions .iter() @@ -150,7 +171,9 @@ async fn get_transaction_nonces( Ok(nonce_values .into_iter() .map(|(nonce_key, nonce_value)| { - let nonce = Nonce(zksync_utils::h256_to_u32(nonce_value)); + // `unwrap()` is safe by construction. + let be_u32_bytes: [u8; 4] = nonce_value[28..].try_into().unwrap(); + let nonce = Nonce(u32::from_be_bytes(be_u32_bytes)); (address_by_nonce_key[&nonce_key], nonce) }) .collect()) @@ -158,12 +181,13 @@ async fn get_transaction_nonces( #[cfg(test)] mod tests { - use zksync_multivm::interface::TransactionExecutionMetrics; + use zksync_multivm::interface::{tracer::ValidationTraces, TransactionExecutionMetrics}; use zksync_node_fee_model::MockBatchFeeParamsProvider; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::create_l2_transaction; - use zksync_types::{L2BlockNumber, PriorityOpId, ProtocolVersionId, StorageLog, H256}; - use zksync_utils::u256_to_h256; + use zksync_types::{ + u256_to_h256, L2BlockNumber, PriorityOpId, ProtocolVersionId, StorageLog, H256, + }; use super::*; @@ -200,7 +224,7 @@ mod tests { let nonces = get_transaction_nonces( &mut storage, - &[transaction.into(), other_transaction.into()], + &[&transaction.into(), &other_transaction.into()], ) .await .unwrap(); @@ -246,7 +270,11 @@ mod tests { let mut storage = pool.connection().await.unwrap(); storage .transactions_dal() - .insert_transaction_l2(&transaction, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &transaction, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); drop(storage); @@ -302,7 +330,11 @@ mod tests { let mut storage = pool.connection().await.unwrap(); storage .transactions_dal() - .insert_transaction_l2(&transaction, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &transaction, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); drop(storage); @@ -355,7 +387,11 @@ mod tests { .unwrap(); storage .transactions_dal() - .insert_transaction_l2(&transaction, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &transaction, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await .unwrap(); drop(storage); diff --git a/core/node/state_keeper/src/seal_criteria/mod.rs b/core/node/state_keeper/src/seal_criteria/mod.rs index 962cc807318b..4c6f56a6f5b7 100644 --- a/core/node/state_keeper/src/seal_criteria/mod.rs +++ b/core/node/state_keeper/src/seal_criteria/mod.rs @@ -20,18 +20,17 @@ use zksync_multivm::{ use zksync_types::{ block::BlockGasCount, utils::display_timestamp, ProtocolVersionId, Transaction, }; -use zksync_utils::time::millis_since; - -mod conditional_sealer; -pub(super) mod criteria; pub use self::conditional_sealer::{ConditionalSealer, NoopSealer, SequencerSealer}; -use super::{ +use crate::{ metrics::AGGREGATION_METRICS, updates::UpdatesManager, - utils::{gas_count_from_tx_and_metrics, gas_count_from_writes}, + utils::{gas_count_from_tx_and_metrics, gas_count_from_writes, millis_since}, }; +mod conditional_sealer; +pub(super) mod criteria; + fn halt_as_metric_label(halt: &Halt) -> &'static str { match halt { Halt::ValidationFailed(_) => "ValidationFailed", @@ -54,6 +53,7 @@ fn halt_as_metric_label(halt: &Halt) -> &'static str { Halt::VMPanic => "VMPanic", Halt::TracerCustom(_) => "TracerCustom", Halt::FailedToPublishCompressedBytecodes => "FailedToPublishCompressedBytecodes", + Halt::FailedBlockTimestampAssertion => "FailedBlockTimestampAssertion", } } @@ -277,19 +277,16 @@ impl L2BlockMaxPayloadSizeSealer { #[cfg(test)] mod tests { - use std::collections::HashMap; - - use zksync_utils::time::seconds_since_epoch; - use super::*; - use crate::tests::{create_execution_result, create_transaction, create_updates_manager}; + use crate::tests::{ + create_execution_result, create_transaction, create_updates_manager, seconds_since_epoch, + }; fn apply_tx_to_manager(tx: Transaction, manager: &mut UpdatesManager) { manager.extend_from_executed_transaction( tx, create_execution_result([]), vec![], - HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index d1e82c44bd6f..3da666628b1b 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -8,17 +8,16 @@ use zksync_dal::{ConnectionPool, Core, CoreDal as _}; use zksync_multivm::interface::{ executor::{BatchExecutor, BatchExecutorFactory}, storage::{InMemoryStorage, StorageView}, - BatchTransactionExecutionResult, ExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionResultAndLogs, + BatchTransactionExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, + VmExecutionResultAndLogs, }; use zksync_state::OwnedStorage; -use zksync_test_account::Account; use zksync_types::{ - fee::Fee, utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Execute, - L1BatchNumber, L2BlockNumber, PriorityOpId, StorageLog, Transaction, L2_BASE_TOKEN_ADDRESS, - SYSTEM_CONTEXT_MINIMAL_BASE_FEE, U256, + commitment::PubdataParams, fee::Fee, u256_to_h256, + utils::storage_key_for_standard_token_balance, AccountTreeId, Address, L1BatchNumber, + L2BlockNumber, StorageLog, Transaction, L2_BASE_TOKEN_ADDRESS, SYSTEM_CONTEXT_MINIMAL_BASE_FEE, + U256, }; -use zksync_utils::u256_to_h256; pub mod test_batch_executor; @@ -28,13 +27,7 @@ pub(super) static BASE_SYSTEM_CONTRACTS: Lazy = /// Creates a `TxExecutionResult` object denoting a successful tx execution. pub(crate) fn successful_exec() -> BatchTransactionExecutionResult { BatchTransactionExecutionResult { - tx_result: Box::new(VmExecutionResultAndLogs { - result: ExecutionResult::Success { output: vec![] }, - logs: Default::default(), - statistics: Default::default(), - refunds: Default::default(), - new_known_factory_deps: None, - }), + tx_result: Box::new(VmExecutionResultAndLogs::mock_success()), compressed_bytecodes: vec![], call_traces: vec![], } @@ -50,6 +43,7 @@ impl BatchExecutorFactory for MockBatchExecutor { _storage: OwnedStorage, _l1_batch_env: L1BatchEnv, _system_env: SystemEnv, + _pubdata_params: PubdataParams, ) -> Box> { Box::new(Self) } @@ -126,29 +120,3 @@ pub fn fee(gas_limit: u32) -> Fee { gas_per_pubdata_limit: U256::from(DEFAULT_GAS_PER_PUBDATA), } } - -/// Returns a valid L2 transaction. -/// Automatically increments nonce of the account. -pub fn l2_transaction(account: &mut Account, gas_limit: u32) -> Transaction { - account.get_l2_tx_for_execute( - Execute { - contract_address: Some(Address::random()), - calldata: vec![], - value: Default::default(), - factory_deps: vec![], - }, - Some(fee(gas_limit)), - ) -} - -pub fn l1_transaction(account: &mut Account, serial_id: PriorityOpId) -> Transaction { - account.get_l1_tx( - Execute { - contract_address: Some(Address::random()), - value: Default::default(), - calldata: vec![], - factory_deps: vec![], - }, - serial_id.0, - ) -} diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index cb282f3b7d6d..5625add021bf 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -27,8 +27,9 @@ use zksync_multivm::{ use zksync_node_test_utils::create_l2_transaction; use zksync_state::{interface::StorageView, OwnedStorage, ReadStorageFactory}; use zksync_types::{ - fee_model::BatchFeeInput, l2_to_l1_log::UserL2ToL1Log, protocol_upgrade::ProtocolUpgradeTx, - Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, + commitment::PubdataParams, fee_model::BatchFeeInput, l2_to_l1_log::UserL2ToL1Log, + protocol_upgrade::ProtocolUpgradeTx, Address, L1BatchNumber, L2BlockNumber, L2ChainId, + ProtocolVersionId, Transaction, H256, }; use crate::{ @@ -257,14 +258,11 @@ pub(crate) fn random_upgrade_tx(tx_number: u64) -> ProtocolUpgradeTx { pub(crate) fn successful_exec_with_log() -> BatchTransactionExecutionResult { BatchTransactionExecutionResult { tx_result: Box::new(VmExecutionResultAndLogs { - result: ExecutionResult::Success { output: vec![] }, logs: VmExecutionLogs { user_l2_to_l1_logs: vec![UserL2ToL1Log::default()], ..VmExecutionLogs::default() }, - statistics: Default::default(), - refunds: Default::default(), - new_known_factory_deps: None, + ..VmExecutionResultAndLogs::mock_success() }), compressed_bytecodes: vec![], call_traces: vec![], @@ -274,13 +272,9 @@ pub(crate) fn successful_exec_with_log() -> BatchTransactionExecutionResult { /// Creates a `TxExecutionResult` object denoting a tx that was rejected. pub(crate) fn rejected_exec(reason: Halt) -> BatchTransactionExecutionResult { BatchTransactionExecutionResult { - tx_result: Box::new(VmExecutionResultAndLogs { - result: ExecutionResult::Halt { reason }, - logs: Default::default(), - statistics: Default::default(), - refunds: Default::default(), - new_known_factory_deps: None, - }), + tx_result: Box::new(VmExecutionResultAndLogs::mock(ExecutionResult::Halt { + reason, + })), compressed_bytecodes: vec![], call_traces: vec![], } @@ -423,6 +417,7 @@ impl BatchExecutorFactory for TestBatchExecutorBuilder { _storage: OwnedStorage, _l1_batch_env: L1BatchEnv, _system_env: SystemEnv, + _pubdata_params: PubdataParams, ) -> Box> { let executor = TestBatchExecutor::new(self.txs.pop_front().unwrap(), self.rollback_set.clone()); @@ -702,6 +697,7 @@ impl StateKeeperIO for TestIO { timestamp: self.timestamp, virtual_blocks: 1, }, + pubdata_params: Default::default(), }; self.l2_block_number += 1; self.timestamp += 1; @@ -728,6 +724,7 @@ impl StateKeeperIO for TestIO { async fn wait_for_next_tx( &mut self, max_wait: Duration, + _l2_block_timestamp: u64, ) -> anyhow::Result> { let action = self.pop_next_item("wait_for_next_tx"); diff --git a/core/node/state_keeper/src/tests/mod.rs b/core/node/state_keeper/src/tests/mod.rs index 9e971541b204..ca078354c896 100644 --- a/core/node/state_keeper/src/tests/mod.rs +++ b/core/node/state_keeper/src/tests/mod.rs @@ -3,15 +3,15 @@ use std::{ atomic::{AtomicBool, AtomicU64, Ordering}, Arc, }, - time::Instant, + time::{Instant, SystemTime, UNIX_EPOCH}, }; use tokio::sync::watch; use zksync_config::configs::chain::StateKeeperConfig; use zksync_multivm::{ interface::{ - ExecutionResult, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, TxExecutionMode, - VmExecutionLogs, VmExecutionResultAndLogs, VmExecutionStatistics, + Halt, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionLogs, + VmExecutionResultAndLogs, VmExecutionStatistics, }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -20,11 +20,10 @@ use zksync_types::{ aggregated_operations::AggregatedActionType, block::{BlockGasCount, L2BlockExecutionData, L2BlockHasher}, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, - AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, StorageKey, - StorageLog, StorageLogKind, StorageLogWithPreviousValue, Transaction, H256, U256, - ZKPORTER_IS_AVAILABLE, + u256_to_h256, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, + ProtocolVersionId, StorageKey, StorageLog, StorageLogKind, StorageLogWithPreviousValue, + Transaction, H256, U256, ZKPORTER_IS_AVAILABLE, }; -use zksync_utils::u256_to_h256; use crate::{ io::PendingBatchData, @@ -46,6 +45,13 @@ use crate::{ ZkSyncStateKeeper, }; +pub(crate) fn seconds_since_epoch() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Incorrect system time") + .as_secs() +} + /// Creates a mock `PendingBatchData` object containing the provided sequence of L2 blocks. pub(crate) fn pending_batch_data(pending_l2_blocks: Vec) -> PendingBatchData { PendingBatchData { @@ -59,6 +65,7 @@ pub(crate) fn pending_batch_data(pending_l2_blocks: Vec) - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: L2ChainId::from(270), }, + pubdata_params: Default::default(), pending_l2_blocks, } } @@ -102,7 +109,7 @@ pub(super) fn default_l1_batch_env( pub(super) fn create_updates_manager() -> UpdatesManager { let l1_batch_env = default_l1_batch_env(1, 1, Address::default()); - UpdatesManager::new(&l1_batch_env, &default_system_env()) + UpdatesManager::new(&l1_batch_env, &default_system_env(), Default::default()) } pub(super) fn create_transaction(fee_per_gas: u64, gas_per_pubdata: u64) -> Transaction { @@ -119,26 +126,16 @@ pub(super) fn create_execution_result( let total_log_queries = storage_logs.len() + 2; VmExecutionResultAndLogs { - result: ExecutionResult::Success { output: vec![] }, logs: VmExecutionLogs { - events: vec![], - system_l2_to_l1_logs: vec![], - user_l2_to_l1_logs: vec![], storage_logs, total_log_queries_count: total_log_queries, + ..VmExecutionLogs::default() }, statistics: VmExecutionStatistics { - contracts_used: 0, - cycles_used: 0, - gas_used: 0, - gas_remaining: 0, - computational_gas_used: 0, total_log_queries, - pubdata_published: 0, - circuit_statistic: Default::default(), + ..VmExecutionStatistics::default() }, - refunds: Refunds::default(), - new_known_factory_deps: None, + ..VmExecutionResultAndLogs::mock_success() } } diff --git a/core/node/state_keeper/src/types.rs b/core/node/state_keeper/src/types.rs index e112871a6475..db18e32e0963 100644 --- a/core/node/state_keeper/src/types.rs +++ b/core/node/state_keeper/src/types.rs @@ -6,7 +6,9 @@ use std::{ use zksync_dal::{Connection, Core, CoreDal}; use zksync_mempool::{L2TxFilter, MempoolInfo, MempoolStore}; use zksync_multivm::interface::{VmExecutionMetrics, VmExecutionResultAndLogs}; -use zksync_types::{block::BlockGasCount, Address, Nonce, PriorityOpId, Transaction}; +use zksync_types::{ + block::BlockGasCount, Address, Nonce, PriorityOpId, Transaction, TransactionTimeRangeConstraint, +}; use super::{ metrics::StateKeeperGauges, @@ -30,13 +32,32 @@ impl MempoolGuard { Self(Arc::new(Mutex::new(store))) } - pub fn insert(&mut self, transactions: Vec, nonces: HashMap) { + pub fn insert( + &mut self, + transactions: Vec<(Transaction, TransactionTimeRangeConstraint)>, + nonces: HashMap, + ) { self.0 .lock() .expect("failed to acquire mempool lock") .insert(transactions, nonces); } + #[cfg(test)] + pub fn insert_without_constraint( + &mut self, + transactions: Vec, + nonces: HashMap, + ) { + self.insert( + transactions + .into_iter() + .map(|x| (x, TransactionTimeRangeConstraint::default())) + .collect(), + nonces, + ); + } + pub fn has_next(&self, filter: &L2TxFilter) -> bool { self.0 .lock() @@ -44,18 +65,21 @@ impl MempoolGuard { .has_next(filter) } - pub fn next_transaction(&mut self, filter: &L2TxFilter) -> Option { + pub fn next_transaction( + &mut self, + filter: &L2TxFilter, + ) -> Option<(Transaction, TransactionTimeRangeConstraint)> { self.0 .lock() .expect("failed to acquire mempool lock") .next_transaction(filter) } - pub fn rollback(&mut self, rejected: &Transaction) { + pub fn rollback(&mut self, rejected: &Transaction) -> TransactionTimeRangeConstraint { self.0 .lock() .expect("failed to acquire mempool lock") - .rollback(rejected); + .rollback(rejected) } pub fn get_mempool_info(&mut self) -> MempoolInfo { diff --git a/core/node/state_keeper/src/updates/l1_batch_updates.rs b/core/node/state_keeper/src/updates/l1_batch_updates.rs index 2979ebbd8c26..aa2e22cac483 100644 --- a/core/node/state_keeper/src/updates/l1_batch_updates.rs +++ b/core/node/state_keeper/src/updates/l1_batch_updates.rs @@ -49,8 +49,6 @@ impl L1BatchUpdates { #[cfg(test)] mod tests { - use std::collections::HashMap; - use zksync_multivm::vm_latest::TransactionVmExt; use zksync_types::{L2BlockNumber, ProtocolVersionId, H256}; @@ -78,7 +76,6 @@ mod tests { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], - HashMap::new(), vec![], ); diff --git a/core/node/state_keeper/src/updates/l2_block_updates.rs b/core/node/state_keeper/src/updates/l2_block_updates.rs index 27995b384abe..d258f8eeac0b 100644 --- a/core/node/state_keeper/src/updates/l2_block_updates.rs +++ b/core/node/state_keeper/src/updates/l2_block_updates.rs @@ -5,14 +5,14 @@ use zksync_multivm::{ Call, CompressedBytecodeInfo, ExecutionResult, L2BlockEnv, TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionMetrics, VmExecutionResultAndLogs, }, - vm_latest::{utils::extract_bytecodes_marked_as_known, TransactionVmExt}, + vm_latest::TransactionVmExt, }; use zksync_types::{ block::{BlockGasCount, L2BlockHasher}, + bytecode::BytecodeHash, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, L2BlockNumber, ProtocolVersionId, StorageLogWithPreviousValue, Transaction, H256, }; -use zksync_utils::bytecode::hash_bytecode; use crate::metrics::KEEPER_METRICS; @@ -88,16 +88,10 @@ impl L2BlockUpdates { tx_l1_gas_this_tx: BlockGasCount, execution_metrics: VmExecutionMetrics, compressed_bytecodes: Vec, - new_known_factory_deps: HashMap>, call_traces: Vec, ) { let saved_factory_deps = - extract_bytecodes_marked_as_known(&tx_execution_result.logs.events); - self.events.extend(tx_execution_result.logs.events); - self.user_l2_to_l1_logs - .extend(tx_execution_result.logs.user_l2_to_l1_logs); - self.system_l2_to_l1_logs - .extend(tx_execution_result.logs.system_l2_to_l1_logs); + VmEvent::extract_bytecodes_marked_as_known(&tx_execution_result.logs.events); let gas_refunded = tx_execution_result.refunds.gas_refunded; let operator_suggested_refund = tx_execution_result.refunds.operator_suggested_refund; @@ -125,14 +119,19 @@ impl L2BlockUpdates { let factory_deps = &tx.execute.factory_deps; let mut tx_factory_deps: HashMap<_, _> = factory_deps .iter() - .map(|bytecode| (hash_bytecode(bytecode), bytecode.clone())) + .map(|bytecode| { + ( + BytecodeHash::for_bytecode(bytecode).value(), + bytecode.clone(), + ) + }) .collect(); // Ensure that *dynamic* factory deps (ones that may be created when executing EVM contracts) // are added into the lookup map as well. - tx_factory_deps.extend(new_known_factory_deps); + tx_factory_deps.extend(tx_execution_result.dynamic_factory_deps); // Save all bytecodes that were marked as known in the bootloader - let known_bytecodes = saved_factory_deps.into_iter().map(|bytecode_hash| { + let known_bytecodes = saved_factory_deps.map(|bytecode_hash| { let bytecode = tx_factory_deps.get(&bytecode_hash).unwrap_or_else(|| { panic!( "Failed to get factory deps on tx: bytecode hash: {:?}, tx hash: {}", @@ -140,7 +139,7 @@ impl L2BlockUpdates { tx.hash() ) }); - (bytecode_hash, bytecode.to_vec()) + (bytecode_hash, bytecode.clone()) }); self.new_factory_deps.extend(known_bytecodes); @@ -149,6 +148,11 @@ impl L2BlockUpdates { self.txs_encoding_size += tx.bootloader_encoding_size(); self.payload_encoding_size += zksync_protobuf::repr::encode::(&tx).len(); + self.events.extend(tx_execution_result.logs.events); + self.user_l2_to_l1_logs + .extend(tx_execution_result.logs.user_l2_to_l1_logs); + self.system_l2_to_l1_logs + .extend(tx_execution_result.logs.system_l2_to_l1_logs); self.storage_logs .extend(tx_execution_result.logs.storage_logs); @@ -211,7 +215,6 @@ mod tests { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], - HashMap::new(), vec![], ); diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index 6211755eb156..752963580e37 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -1,5 +1,3 @@ -use std::collections::HashMap; - use zksync_contracts::BaseSystemContractsHashes; use zksync_multivm::{ interface::{ @@ -9,8 +7,8 @@ use zksync_multivm::{ utils::{get_batch_base_fee, StorageWritesDeduplicator}, }; use zksync_types::{ - block::BlockGasCount, fee_model::BatchFeeInput, Address, L1BatchNumber, L2BlockNumber, - ProtocolVersionId, Transaction, H256, + block::BlockGasCount, commitment::PubdataParams, fee_model::BatchFeeInput, Address, + L1BatchNumber, L2BlockNumber, ProtocolVersionId, Transaction, }; pub(crate) use self::{l1_batch_updates::L1BatchUpdates, l2_block_updates::L2BlockUpdates}; @@ -41,10 +39,15 @@ pub struct UpdatesManager { pub l1_batch: L1BatchUpdates, pub l2_block: L2BlockUpdates, pub storage_writes_deduplicator: StorageWritesDeduplicator, + pubdata_params: PubdataParams, } impl UpdatesManager { - pub fn new(l1_batch_env: &L1BatchEnv, system_env: &SystemEnv) -> Self { + pub fn new( + l1_batch_env: &L1BatchEnv, + system_env: &SystemEnv, + pubdata_params: PubdataParams, + ) -> Self { let protocol_version = system_env.version; Self { batch_timestamp: l1_batch_env.timestamp, @@ -63,6 +66,7 @@ impl UpdatesManager { ), storage_writes_deduplicator: StorageWritesDeduplicator::new(), storage_view_cache: None, + pubdata_params, } } @@ -85,7 +89,7 @@ impl UpdatesManager { pub(crate) fn seal_l2_block_command( &self, - l2_shared_bridge_addr: Address, + l2_legacy_shared_bridge_addr: Option
, pre_insert_txs: bool, ) -> L2BlockSealCommand { L2BlockSealCommand { @@ -97,8 +101,9 @@ impl UpdatesManager { base_fee_per_gas: self.base_fee_per_gas, base_system_contracts_hashes: self.base_system_contract_hashes, protocol_version: Some(self.protocol_version), - l2_shared_bridge_addr, + l2_legacy_shared_bridge_addr, pre_insert_txs, + pubdata_params: self.pubdata_params, } } @@ -112,7 +117,6 @@ impl UpdatesManager { tx: Transaction, tx_execution_result: VmExecutionResultAndLogs, compressed_bytecodes: Vec, - new_known_factory_deps: HashMap>, tx_l1_gas_this_tx: BlockGasCount, execution_metrics: VmExecutionMetrics, call_traces: Vec, @@ -128,7 +132,6 @@ impl UpdatesManager { tx_l1_gas_this_tx, execution_metrics, compressed_bytecodes, - new_known_factory_deps, call_traces, ); latency.observe(); @@ -211,11 +214,12 @@ pub struct L2BlockSealCommand { pub base_fee_per_gas: u64, pub base_system_contracts_hashes: BaseSystemContractsHashes, pub protocol_version: Option, - pub l2_shared_bridge_addr: Address, + pub l2_legacy_shared_bridge_addr: Option
, /// Whether transactions should be pre-inserted to DB. /// Should be set to `true` for EN's IO as EN doesn't store transactions in DB /// before they are included into L2 blocks. pub pre_insert_txs: bool, + pub pubdata_params: PubdataParams, } #[cfg(test)] @@ -238,7 +242,6 @@ mod tests { tx, create_execution_result([]), vec![], - HashMap::new(), new_block_gas_count(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/state_keeper/src/utils.rs b/core/node/state_keeper/src/utils.rs index 4240ad306251..320dd49583ed 100644 --- a/core/node/state_keeper/src/utils.rs +++ b/core/node/state_keeper/src/utils.rs @@ -1,3 +1,5 @@ +use std::time::{SystemTime, UNIX_EPOCH}; + use zksync_multivm::interface::{DeduplicatedWritesMetrics, VmExecutionMetrics}; use zksync_types::{ aggregated_operations::AggregatedActionType, block::BlockGasCount, ExecuteTransactionCommon, @@ -86,3 +88,15 @@ pub(super) fn gas_count_from_writes( execute: 0, } } + +// TODO (SMA-1206): use seconds instead of milliseconds. +pub(super) fn millis_since_epoch() -> u128 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Incorrect system time") + .as_millis() +} + +pub(super) fn millis_since(since: u64) -> u64 { + (millis_since_epoch() - since as u128 * 1000) as u64 +} diff --git a/core/node/tee_verifier_input_producer/Cargo.toml b/core/node/tee_verifier_input_producer/Cargo.toml deleted file mode 100644 index 7a5a4de5d0c9..000000000000 --- a/core/node/tee_verifier_input_producer/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "zksync_tee_verifier_input_producer" -description = "ZKsync TEE verifier input producer" -version.workspace = true -edition.workspace = true -authors.workspace = true -homepage.workspace = true -repository.workspace = true -license.workspace = true -keywords.workspace = true -categories.workspace = true - -[dependencies] -zksync_dal.workspace = true -zksync_object_store.workspace = true -zksync_prover_interface.workspace = true -zksync_queued_job_processor.workspace = true -zksync_tee_verifier.workspace = true -zksync_types.workspace = true -zksync_utils.workspace = true -zksync_vm_executor.workspace = true -vise.workspace = true - -anyhow.workspace = true -async-trait.workspace = true -tracing.workspace = true -tokio = { workspace = true, features = ["time"] } diff --git a/core/node/tee_verifier_input_producer/README.md b/core/node/tee_verifier_input_producer/README.md deleted file mode 100644 index 75a2029985cc..000000000000 --- a/core/node/tee_verifier_input_producer/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# `zksync_tee_verifier_input_producer` - -Component responsible for producing inputs for verification of execution in TEE. diff --git a/core/node/tee_verifier_input_producer/src/lib.rs b/core/node/tee_verifier_input_producer/src/lib.rs deleted file mode 100644 index 8a99aa07ae51..000000000000 --- a/core/node/tee_verifier_input_producer/src/lib.rs +++ /dev/null @@ -1,261 +0,0 @@ -//! Produces input for a TEE Verifier -//! -//! Extract all data needed to re-execute and verify an L1Batch without accessing -//! the DB and/or the object store. -//! -//! For testing purposes, the L1 batch is re-executed immediately for now. -//! Eventually, this component will only extract the inputs and send them to another -//! machine over a "to be defined" channel, e.g., save them to an object store. - -use std::{sync::Arc, time::Instant}; - -use anyhow::Context; -use async_trait::async_trait; -use tokio::task::JoinHandle; -use zksync_dal::{tee_verifier_input_producer_dal::JOB_MAX_ATTEMPT, ConnectionPool, Core, CoreDal}; -use zksync_object_store::ObjectStore; -use zksync_prover_interface::inputs::{ - TeeVerifierInput, V1TeeVerifierInput, WitnessInputMerklePaths, -}; -use zksync_queued_job_processor::JobProcessor; -use zksync_tee_verifier::Verify; -use zksync_types::{tee_types::TeeType, L1BatchNumber, L2ChainId}; -use zksync_utils::u256_to_h256; -use zksync_vm_executor::storage::L1BatchParamsProvider; - -use self::metrics::METRICS; - -mod metrics; - -/// Component that extracts all data (from DB) necessary to run a TEE Verifier. -#[derive(Debug)] -pub struct TeeVerifierInputProducer { - connection_pool: ConnectionPool, - l2_chain_id: L2ChainId, - object_store: Arc, -} - -impl TeeVerifierInputProducer { - pub async fn new( - connection_pool: ConnectionPool, - object_store: Arc, - l2_chain_id: L2ChainId, - ) -> anyhow::Result { - Ok(TeeVerifierInputProducer { - connection_pool, - object_store, - l2_chain_id, - }) - } - - async fn process_job_impl( - l1_batch_number: L1BatchNumber, - started_at: Instant, - connection_pool: ConnectionPool, - object_store: Arc, - l2_chain_id: L2ChainId, - ) -> anyhow::Result { - let prepare_basic_circuits_job: WitnessInputMerklePaths = object_store - .get(l1_batch_number) - .await - .context("failed to get PrepareBasicCircuitsJob from object store")?; - - let mut connection = connection_pool - .connection() - .await - .context("failed to get connection for TeeVerifierInputProducer")?; - - let l2_blocks_execution_data = connection - .transactions_dal() - .get_l2_blocks_to_execute_for_l1_batch(l1_batch_number) - .await?; - - let l1_batch_header = connection - .blocks_dal() - .get_l1_batch_header(l1_batch_number) - .await - .with_context(|| format!("header is missing for L1 batch #{l1_batch_number}"))? - .unwrap(); - - let l1_batch_params_provider = L1BatchParamsProvider::new(&mut connection) - .await - .context("failed initializing L1 batch params provider")?; - - // In the state keeper, this value is used to reject execution. - // All batches have already been executed by State Keeper. - // This means we don't want to reject any execution, therefore we're using MAX as an allow all. - let validation_computational_gas_limit = u32::MAX; - - let (system_env, l1_batch_env) = l1_batch_params_provider - .load_l1_batch_env( - &mut connection, - l1_batch_number, - validation_computational_gas_limit, - l2_chain_id, - ) - .await? - .with_context(|| format!("expected L1 batch #{l1_batch_number} to be sealed"))?; - - let used_contract_hashes = l1_batch_header - .used_contract_hashes - .into_iter() - .map(u256_to_h256) - .collect(); - - // `get_factory_deps()` returns the bytecode in chunks of `Vec<[u8; 32]>`, - // but `fn store_factory_dep(&mut self, hash: H256, bytecode: Vec)` in `InMemoryStorage` wants flat byte vecs. - pub fn into_flattened(data: Vec<[T; N]>) -> Vec { - let mut new = Vec::new(); - for slice in data.iter() { - new.extend_from_slice(slice); - } - new - } - - let used_contracts = connection - .factory_deps_dal() - .get_factory_deps(&used_contract_hashes) - .await - .into_iter() - .map(|(hash, bytes)| (u256_to_h256(hash), into_flattened(bytes))) - .collect(); - - tracing::info!("Started execution of l1_batch: {l1_batch_number:?}"); - - let tee_verifier_input = V1TeeVerifierInput::new( - prepare_basic_circuits_job, - l2_blocks_execution_data, - l1_batch_env, - system_env, - used_contracts, - ); - - // TODO (SEC-263): remove these 2 lines after successful testnet runs - tee_verifier_input.clone().verify()?; - tracing::info!("Looks like we verified {l1_batch_number} correctly"); - - tracing::info!("Finished execution of l1_batch: {l1_batch_number:?}"); - - METRICS.process_batch_time.observe(started_at.elapsed()); - tracing::debug!( - "TeeVerifierInputProducer took {:?} for L1BatchNumber {}", - started_at.elapsed(), - l1_batch_number.0 - ); - - Ok(TeeVerifierInput::new(tee_verifier_input)) - } -} - -#[async_trait] -impl JobProcessor for TeeVerifierInputProducer { - type Job = L1BatchNumber; - type JobId = L1BatchNumber; - type JobArtifacts = TeeVerifierInput; - const SERVICE_NAME: &'static str = "tee_verifier_input_producer"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut connection = self.connection_pool.connection().await?; - let l1_batch_to_process = connection - .tee_verifier_input_producer_dal() - .get_next_tee_verifier_input_producer_job() - .await - .context("failed to get next basic witness input producer job")?; - Ok(l1_batch_to_process.map(|number| (number, number))) - } - - async fn save_failure(&self, job_id: Self::JobId, started_at: Instant, error: String) { - let attempts = self - .connection_pool - .connection() - .await - .unwrap() - .tee_verifier_input_producer_dal() - .mark_job_as_failed(job_id, started_at, error) - .await - .expect("errored whilst marking job as failed"); - if let Some(tries) = attempts { - tracing::warn!("Failed to process job: {job_id:?}, after {tries} tries."); - } else { - tracing::warn!("L1 Batch {job_id:?} was processed successfully by another worker."); - } - } - - async fn process_job( - &self, - _job_id: &Self::JobId, - job: Self::Job, - started_at: Instant, - ) -> JoinHandle> { - let l2_chain_id = self.l2_chain_id; - let connection_pool = self.connection_pool.clone(); - let object_store = self.object_store.clone(); - tokio::task::spawn(async move { - Self::process_job_impl( - job, - started_at, - connection_pool.clone(), - object_store, - l2_chain_id, - ) - .await - }) - } - - async fn save_result( - &self, - job_id: Self::JobId, - started_at: Instant, - artifacts: Self::JobArtifacts, - ) -> anyhow::Result<()> { - let observer: vise::LatencyObserver = METRICS.upload_input_time.start(); - let object_path = self - .object_store - .put(job_id, &artifacts) - .await - .context("failed to upload artifacts for TeeVerifierInputProducer")?; - observer.observe(); - let mut connection = self - .connection_pool - .connection() - .await - .context("failed to acquire DB connection for TeeVerifierInputProducer")?; - let mut transaction = connection - .start_transaction() - .await - .context("failed to acquire DB transaction for TeeVerifierInputProducer")?; - transaction - .tee_verifier_input_producer_dal() - .mark_job_as_successful(job_id, started_at, &object_path) - .await - .context("failed to mark job as successful for TeeVerifierInputProducer")?; - transaction - .tee_proof_generation_dal() - .insert_tee_proof_generation_job(job_id, TeeType::Sgx) - .await?; - transaction - .commit() - .await - .context("failed to commit DB transaction for TeeVerifierInputProducer")?; - METRICS.block_number_processed.set(job_id.0 as u64); - Ok(()) - } - - fn max_attempts(&self) -> u32 { - JOB_MAX_ATTEMPT as u32 - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut connection = self - .connection_pool - .connection() - .await - .context("failed to acquire DB connection for TeeVerifierInputProducer")?; - connection - .tee_verifier_input_producer_dal() - .get_tee_verifier_input_producer_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for TeeVerifierInputProducer") - } -} diff --git a/core/node/tee_verifier_input_producer/src/metrics.rs b/core/node/tee_verifier_input_producer/src/metrics.rs deleted file mode 100644 index 362804d338e9..000000000000 --- a/core/node/tee_verifier_input_producer/src/metrics.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! Metrics - -use std::time::Duration; - -use vise::{Buckets, Gauge, Histogram, Metrics, Unit}; - -#[derive(Debug, Metrics)] -#[metrics(prefix = "tee_verifier_input_producer")] -pub(crate) struct TeeVerifierInputProducerMetrics { - #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] - pub process_batch_time: Histogram, - #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] - pub upload_input_time: Histogram, - pub block_number_processed: Gauge, -} - -#[vise::register] -pub(super) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/test_utils/Cargo.toml b/core/node/test_utils/Cargo.toml index af60008df570..fd657c7d82c0 100644 --- a/core/node/test_utils/Cargo.toml +++ b/core/node/test_utils/Cargo.toml @@ -11,11 +11,9 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_multivm.workspace = true zksync_types.workspace = true zksync_dal.workspace = true zksync_contracts.workspace = true zksync_merkle_tree.workspace = true zksync_system_constants.workspace = true -zksync_utils.workspace = true -zksync_node_genesis.workspace = true +zksync_vm_interface.workspace = true diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index 9eb53994eee5..9a02c18cd235 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -2,14 +2,9 @@ use std::collections::HashMap; -use zksync_contracts::BaseSystemContractsHashes; +use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; use zksync_dal::{Connection, Core, CoreDal}; use zksync_merkle_tree::{domain::ZkSyncTree, TreeInstruction}; -use zksync_multivm::{ - interface::{TransactionExecutionResult, TxExecutionStatus, VmExecutionMetrics}, - utils::get_max_gas_per_pubdata_byte, -}; -use zksync_node_genesis::GenesisParams; use zksync_system_constants::{get_intrinsic_constants, ZKPORTER_IS_AVAILABLE}; use zksync_types::{ block::{L1BatchHeader, L2BlockHeader}, @@ -27,6 +22,10 @@ use zksync_types::{ Address, K256PrivateKey, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersion, ProtocolVersionId, StorageLog, H256, U256, }; +use zksync_vm_interface::{TransactionExecutionResult, TxExecutionStatus, VmExecutionMetrics}; + +/// Value for recent protocol versions. +const MAX_GAS_PER_PUBDATA_BYTE: u64 = 50_000; /// Creates an L2 block header with the specified number and deterministic contents. pub fn create_l2_block(number: u32) -> L2BlockHeader { @@ -39,12 +38,13 @@ pub fn create_l2_block(number: u32) -> L2BlockHeader { base_fee_per_gas: 100, batch_fee_input: BatchFeeInput::l1_pegged(100, 100), fee_account_address: Address::zero(), - gas_per_pubdata_limit: get_max_gas_per_pubdata_byte(ProtocolVersionId::latest().into()), + gas_per_pubdata_limit: MAX_GAS_PER_PUBDATA_BYTE, base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::latest()), virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), } } @@ -98,6 +98,10 @@ pub fn create_l1_batch_metadata(number: u32) -> L1BatchMetadata { events_queue_commitment: Some(H256::zero()), bootloader_initial_content_commitment: Some(H256::zero()), state_diffs_compressed: vec![], + state_diff_hash: Some(H256::zero()), + local_root: Some(H256::zero()), + aggregation_root: Some(H256::zero()), + da_inclusion_data: Some(vec![]), } } @@ -128,6 +132,9 @@ pub fn l1_batch_metadata_to_commitment_artifacts( } _ => None, }, + local_root: metadata.local_root.unwrap(), + aggregation_root: metadata.aggregation_root.unwrap(), + state_diff_hash: metadata.state_diff_hash.unwrap(), } } @@ -187,14 +194,14 @@ impl Snapshot { l1_batch: L1BatchNumber, l2_block: L2BlockNumber, storage_logs: Vec, - genesis_params: GenesisParams, + contracts: &BaseSystemContracts, + protocol_version: ProtocolVersionId, ) -> Self { - let contracts = genesis_params.base_system_contracts(); let l1_batch = L1BatchHeader::new( l1_batch, l1_batch.0.into(), contracts.hashes(), - genesis_params.minor_protocol_version(), + protocol_version, ); let l2_block = L2BlockHeader { number: l2_block, @@ -205,14 +212,13 @@ impl Snapshot { base_fee_per_gas: 100, batch_fee_input: BatchFeeInput::l1_pegged(100, 100), fee_account_address: Address::zero(), - gas_per_pubdata_limit: get_max_gas_per_pubdata_byte( - genesis_params.minor_protocol_version().into(), - ), + gas_per_pubdata_limit: MAX_GAS_PER_PUBDATA_BYTE, base_system_contracts_hashes: contracts.hashes(), - protocol_version: Some(genesis_params.minor_protocol_version()), + protocol_version: Some(protocol_version), virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; Snapshot { l1_batch, @@ -220,7 +226,7 @@ impl Snapshot { factory_deps: [&contracts.bootloader, &contracts.default_aa] .into_iter() .chain(contracts.evm_emulator.as_ref()) - .map(|c| (c.hash, zksync_utils::be_words_to_bytes(&c.code))) + .map(|c| (c.hash, c.code.clone())) .collect(), storage_logs, } @@ -244,7 +250,13 @@ pub async fn prepare_recovery_snapshot( enumeration_index: i as u64 + 1, }) .collect(); - let snapshot = Snapshot::new(l1_batch, l2_block, storage_logs, GenesisParams::mock()); + let snapshot = Snapshot::new( + l1_batch, + l2_block, + storage_logs, + &BaseSystemContracts::load_from_disk(), + ProtocolVersionId::latest(), + ); recover(storage, snapshot).await } diff --git a/core/node/vm_runner/Cargo.toml b/core/node/vm_runner/Cargo.toml index 9c235ad6b291..333647b64367 100644 --- a/core/node/vm_runner/Cargo.toml +++ b/core/node/vm_runner/Cargo.toml @@ -17,7 +17,6 @@ zksync_dal.workspace = true zksync_contracts.workspace = true zksync_state.workspace = true zksync_storage.workspace = true -zksync_utils.workspace = true zksync_prover_interface.workspace = true zksync_object_store.workspace = true zksync_vm_executor.workspace = true @@ -36,7 +35,7 @@ vise.workspace = true [dev-dependencies] zksync_node_test_utils.workspace = true zksync_node_genesis.workspace = true -zksync_test_account.workspace = true +zksync_test_contracts.workspace = true assert_matches.workspace = true backon.workspace = true futures = { workspace = true, features = ["compat"] } diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index dc94752d9886..5d63d09b5caf 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -1,4 +1,7 @@ -use std::{collections::HashSet, sync::Arc}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use anyhow::anyhow; use async_trait::async_trait; @@ -8,10 +11,9 @@ use zksync_object_store::ObjectStore; use zksync_prover_interface::inputs::VMRunWitnessInputData; use zksync_state::OwnedStorage; use zksync_types::{ - block::StorageOracleInfo, witness_block_state::WitnessStorageState, L1BatchNumber, L2ChainId, - H256, + block::StorageOracleInfo, h256_to_u256, u256_to_h256, witness_block_state::WitnessStorageState, + L1BatchNumber, L2ChainId, H256, }; -use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; use zksync_vm_interface::{executor::BatchExecutorFactory, L1BatchEnv, L2BlockEnv, SystemEnv}; use crate::{ @@ -224,7 +226,6 @@ async fn get_updates_manager_witness_input_data( .get_sealed_factory_dep(default_aa) .await? .ok_or_else(|| anyhow!("Default account bytecode should exist"))?; - let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); let used_contract_hashes = &output.batch.final_execution_state.used_contract_hashes; let hashes: HashSet = used_contract_hashes @@ -238,7 +239,7 @@ async fn get_updates_manager_witness_input_data( .get_factory_deps(&hashes) .await; if used_contract_hashes.contains(&account_code_hash) { - used_bytecodes.insert(account_code_hash, account_bytecode); + used_bytecodes.insert(account_code_hash, account_bytecode_bytes); } let evm_emulator_code_hash = if let Some(evm_emulator) = evm_emulator { @@ -248,8 +249,7 @@ async fn get_updates_manager_witness_input_data( .factory_deps_dal() .get_sealed_factory_dep(evm_emulator) .await? - .ok_or_else(|| anyhow!("EVM Simulator bytecode should exist"))?; - let evm_emulator_bytecode = bytes_to_chunks(&evm_emulator_bytecode); + .ok_or_else(|| anyhow!("EVM emulator bytecode should exist"))?; used_bytecodes.insert(evm_emulator_code_hash, evm_emulator_bytecode); } Some(evm_emulator_code_hash) @@ -266,7 +266,10 @@ async fn get_updates_manager_witness_input_data( Ok(VMRunWitnessInputData { l1_batch_number, - used_bytecodes, + used_bytecodes: used_bytecodes + .into_iter() + .map(|(hash, code)| (hash, bytes_to_chunks(&code))) + .collect(), initial_heap_content, protocol_version: system_env.version, bootloader_code, @@ -278,6 +281,13 @@ async fn get_updates_manager_witness_input_data( }) } +fn bytes_to_chunks(bytes: &[u8]) -> Vec<[u8; 32]> { + bytes + .chunks(32) + .map(|chunk| chunk.try_into().unwrap()) + .collect() +} + #[tracing::instrument(skip_all)] async fn assert_database_witness_input_data( connection: &mut Connection<'_, Core>, @@ -305,7 +315,6 @@ async fn assert_database_witness_input_data( .await .expect("Failed fetching default account bytecode from DB") .expect("Default account bytecode should exist"); - let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); let hashes: HashSet = block_header .used_contract_hashes @@ -322,7 +331,7 @@ async fn assert_database_witness_input_data( .used_contract_hashes .contains(&account_code_hash) { - used_bytecodes.insert(account_code_hash, account_bytecode); + used_bytecodes.insert(account_code_hash, account_bytecode_bytes); } assert_eq!( @@ -331,6 +340,10 @@ async fn assert_database_witness_input_data( "{} factory deps are not found in DB", hashes.len() - used_bytecodes.len() ); + let used_bytecodes: HashMap<_, _> = used_bytecodes + .into_iter() + .map(|(hash, code)| (hash, bytes_to_chunks(&code))) + .collect(); let StorageOracleInfo { storage_refunds, diff --git a/core/node/vm_runner/src/process.rs b/core/node/vm_runner/src/process.rs index 4f7ac1f97284..dbd218c8dc5f 100644 --- a/core/node/vm_runner/src/process.rs +++ b/core/node/vm_runner/src/process.rs @@ -82,6 +82,7 @@ impl VmRunner { storage, batch_data.l1_batch_env.clone(), batch_data.system_env.clone(), + batch_data.pubdata_params, ); let mut output_handler = self .output_handler_factory diff --git a/core/node/vm_runner/src/storage.rs b/core/node/vm_runner/src/storage.rs index 2285455ba244..9ab4ed87b9f1 100644 --- a/core/node/vm_runner/src/storage.rs +++ b/core/node/vm_runner/src/storage.rs @@ -13,7 +13,9 @@ use zksync_state::{ AsyncCatchupTask, BatchDiff, OwnedStorage, RocksdbCell, RocksdbStorage, RocksdbStorageBuilder, RocksdbWithMemory, }; -use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2ChainId}; +use zksync_types::{ + block::L2BlockExecutionData, commitment::PubdataParams, L1BatchNumber, L2ChainId, +}; use zksync_vm_executor::storage::L1BatchParamsProvider; use zksync_vm_interface::{L1BatchEnv, SystemEnv}; @@ -106,6 +108,8 @@ pub struct BatchExecuteData { pub l1_batch_env: L1BatchEnv, /// Execution process parameters. pub system_env: SystemEnv, + /// Pubdata building parameters. + pub pubdata_params: PubdataParams, /// List of L2 blocks and corresponding transactions that were executed within batch. pub l2_blocks: Vec, } @@ -394,7 +398,7 @@ pub(crate) async fn load_batch_execute_data( l1_batch_params_provider: &L1BatchParamsProvider, chain_id: L2ChainId, ) -> anyhow::Result> { - let Some((system_env, l1_batch_env)) = l1_batch_params_provider + let Some((system_env, l1_batch_env, pubdata_params)) = l1_batch_params_provider .load_l1_batch_env( conn, l1_batch_number, @@ -415,6 +419,7 @@ pub(crate) async fn load_batch_execute_data( Ok(Some(BatchExecuteData { l1_batch_env, system_env, + pubdata_params, l2_blocks, })) } diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index 575fd59be042..6bd6d662cfa9 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -9,18 +9,21 @@ use zksync_node_test_utils::{ create_l1_batch_metadata, create_l2_block, execute_l2_transaction, l1_batch_metadata_to_commitment_artifacts, }; -use zksync_test_account::Account; +use zksync_test_contracts::Account; use zksync_types::{ block::{L1BatchHeader, L2BlockHasher}, + bytecode::BytecodeHash, fee::Fee, - get_intrinsic_constants, + get_intrinsic_constants, h256_to_u256, l2::L2Tx, + u256_to_h256, utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Execute, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, StorageLog, StorageLogKind, StorageValue, H160, H256, L2_BASE_TOKEN_ADDRESS, U256, }; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; -use zksync_vm_interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TransactionExecutionMetrics}; +use zksync_vm_interface::{ + tracer::ValidationTraces, L1BatchEnv, L2BlockEnv, SystemEnv, TransactionExecutionMetrics, +}; use super::*; @@ -242,7 +245,11 @@ async fn store_l1_batches( let account = accounts.choose_mut(&mut rng).unwrap(); let tx = create_l2_transaction(account, 1000000, 100); conn.transactions_dal() - .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .insert_transaction_l2( + &tx, + TransactionExecutionMetrics::default(), + ValidationTraces::default(), + ) .await?; let mut logs = Vec::new(); let mut written_keys = Vec::new(); @@ -320,7 +327,7 @@ async fn store_l1_batches( header.used_contract_hashes = genesis_params .system_contracts() .iter() - .map(|contract| hash_bytecode(&contract.bytecode)) + .map(|contract| BytecodeHash::for_bytecode(&contract.bytecode).value()) .chain([genesis_params.base_system_contracts().hashes().default_aa]) .chain(genesis_params.base_system_contracts().hashes().evm_emulator) .map(h256_to_u256) diff --git a/core/node/vm_runner/src/tests/process.rs b/core/node/vm_runner/src/tests/process.rs index 8e9bd66f3c91..cd77bca79c1a 100644 --- a/core/node/vm_runner/src/tests/process.rs +++ b/core/node/vm_runner/src/tests/process.rs @@ -5,7 +5,7 @@ use test_casing::test_casing; use tokio::sync::{watch, RwLock}; use zksync_dal::{ConnectionPool, Core}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_test_account::Account; +use zksync_test_contracts::Account; use zksync_types::{L1BatchNumber, L2ChainId}; use zksync_vm_executor::batch::MainBatchExecutorFactory; diff --git a/core/node/vm_runner/src/tests/storage.rs b/core/node/vm_runner/src/tests/storage.rs index 838b469f0ef3..8727eecbcd0a 100644 --- a/core/node/vm_runner/src/tests/storage.rs +++ b/core/node/vm_runner/src/tests/storage.rs @@ -10,7 +10,7 @@ use tokio::{ use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_state::{interface::ReadStorage, OwnedStorage, PostgresStorage}; -use zksync_test_account::Account; +use zksync_test_contracts::Account; use zksync_types::{AccountTreeId, L1BatchNumber, L2ChainId, StorageKey}; use crate::{ diff --git a/core/tests/loadnext/Cargo.toml b/core/tests/loadnext/Cargo.toml index adb5c9eca429..91f987035acf 100644 --- a/core/tests/loadnext/Cargo.toml +++ b/core/tests/loadnext/Cargo.toml @@ -9,6 +9,7 @@ license.workspace = true keywords.workspace = true categories.workspace = true publish = false +exclude = ["./dump"] [dependencies] zksync_types.workspace = true @@ -17,7 +18,7 @@ zksync_eth_signer.workspace = true zksync_web3_decl.workspace = true zksync_eth_client.workspace = true zksync_config.workspace = true -zksync_contracts.workspace = true +zksync_test_contracts.workspace = true zksync_system_constants.workspace = true zksync_vlog.workspace = true diff --git a/core/tests/loadnext/README.md b/core/tests/loadnext/README.md index 59288a7160ec..cc873c598c18 100644 --- a/core/tests/loadnext/README.md +++ b/core/tests/loadnext/README.md @@ -1,4 +1,4 @@ -# Loadnext: loadtest for ZKsync +# Loadnext: load test for ZKsync Loadnext is a utility for random stress-testing the ZKsync server. It is capable of simulating the behavior of many independent users of ZKsync network, who are sending quasi-random requests to the server. @@ -27,21 +27,21 @@ It: ## Transactions Parameters -The smart contract that is used for every l2 transaction can be found here: -`etc/contracts-test-data/contracts/loadnext/loadnext_contract.sol`. +The smart contract that is used for every l2 transaction can be found in the [`zksync_test_contracts`] crate. The `execute` function of the contract has the following parameters: -``` - function execute(uint reads, uint writes, uint hashes, uint events, uint max_recursion, uint deploys) external returns(uint) { +```solidity +function execute(uint reads, uint initialWrites, uint repeatedWrites, uint hashes, uint events, uint maxRecursion, uint deploys) external returns(uint) { ``` which correspond to the following configuration options: -``` +```rust pub struct LoadnextContractExecutionParams { pub reads: usize, - pub writes: usize, + pub initial_writes: usize, + pub repeated_writes: usize, pub events: usize, pub hashes: usize, pub recursive_calls: usize, @@ -51,8 +51,9 @@ pub struct LoadnextContractExecutionParams { For example, to simulate an average transaction on mainnet, one could do: -``` -CONTRACT_EXECUTION_PARAMS_WRITES=2 +```env +CONTRACT_EXECUTION_PARAMS_INITIAL_WRITES=2 +CONTRACT_EXECUTION_PARAMS_REPEATED_WRITES=2 CONTRACT_EXECUTION_PARAMS_READS=6 CONTRACT_EXECUTION_PARAMS_EVENTS=2 CONTRACT_EXECUTION_PARAMS_HASHES=10 @@ -62,8 +63,9 @@ CONTRACT_EXECUTION_PARAMS_DEPLOYS=0 Similarly, to simulate a lightweight transaction: -``` -CONTRACT_EXECUTION_PARAMS_WRITES=0 +```env +CONTRACT_EXECUTION_PARAMS_INITIAL_WRITES=0 +CONTRACT_EXECUTION_PARAMS_REPEATED_WRITES=0 CONTRACT_EXECUTION_PARAMS_READS=0 CONTRACT_EXECUTION_PARAMS_EVENTS=0 CONTRACT_EXECUTION_PARAMS_HASHES=0 @@ -86,10 +88,11 @@ Example invocation: - `MASTER_WALLET_PK` needs to be set to the private key of the master account. - `MAIN_TOKEN` needs to be set to the address of the token to be used for the loadtest. -``` +```shell cargo build -CONTRACT_EXECUTION_PARAMS_WRITES=2 \ +CONTRACT_EXECUTION_PARAMS_INITIAL_WRITES=2 \ +CONTRACT_EXECUTION_PARAMS_REPEATED_WRITES=2 \ CONTRACT_EXECUTION_PARAMS_READS=6 \ CONTRACT_EXECUTION_PARAMS_EVENTS=2 \ CONTRACT_EXECUTION_PARAMS_HASHES=10 \ @@ -110,3 +113,5 @@ MASTER_WALLET_PK="..." \ MAIN_TOKEN="..." \ cargo run --bin loadnext ``` + +[`zksync_test_contracts`]: ../../lib/test_contracts diff --git a/core/tests/loadnext/src/account/api_request_executor.rs b/core/tests/loadnext/src/account/api_request_executor.rs index 20c4bc2f5970..4733b4c09206 100644 --- a/core/tests/loadnext/src/account/api_request_executor.rs +++ b/core/tests/loadnext/src/account/api_request_executor.rs @@ -52,8 +52,7 @@ impl AccountLifespan { err => RpcError::Custom(err.to_string()), }), ApiRequestType::GetLogs => { - let topics = - random_topics(&self.wallet.test_contract.contract, &mut self.wallet.rng); + let topics = random_topics(&self.wallet.test_contract.abi, &mut self.wallet.rng); // `run_api_requests_task` checks whether the cell is initialized // at every loop iteration and skips logs action if it's not. Thus, // it's safe to unwrap it. diff --git a/core/tests/loadnext/src/account/mod.rs b/core/tests/loadnext/src/account/mod.rs index 0f418bf12676..967970f96fb9 100644 --- a/core/tests/loadnext/src/account/mod.rs +++ b/core/tests/loadnext/src/account/mod.rs @@ -7,7 +7,7 @@ use std::{ use futures::{channel::mpsc, SinkExt}; use rand::Rng; use tokio::sync::RwLock; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; +use zksync_test_contracts::LoadnextContractExecutionParams; use zksync_types::{api::TransactionReceipt, Address, Nonce, H256, U256, U64}; use zksync_web3_decl::{ client::{Client, L2}, diff --git a/core/tests/loadnext/src/account/pubsub_executor.rs b/core/tests/loadnext/src/account/pubsub_executor.rs index 07f45b4ae972..1b31207aab87 100644 --- a/core/tests/loadnext/src/account/pubsub_executor.rs +++ b/core/tests/loadnext/src/account/pubsub_executor.rs @@ -67,7 +67,7 @@ impl AccountLifespan { let params = match subscription_type { SubscriptionType::Logs => { let topics = super::api_request_executor::random_topics( - &self.wallet.test_contract.contract, + &self.wallet.test_contract.abi, &mut self.wallet.rng, ); let contract_address = self.wallet.deployed_contract_address.get().unwrap(); diff --git a/core/tests/loadnext/src/account/tx_command_executor.rs b/core/tests/loadnext/src/account/tx_command_executor.rs index 2a916564fd61..55e5a6b2a2d9 100644 --- a/core/tests/loadnext/src/account/tx_command_executor.rs +++ b/core/tests/loadnext/src/account/tx_command_executor.rs @@ -272,7 +272,7 @@ impl AccountLifespan { let mut builder = wallet .start_deploy_contract() - .bytecode(self.wallet.test_contract.bytecode.clone()) + .bytecode(self.wallet.test_contract.bytecode.to_vec()) .constructor_calldata(constructor_calldata); let fee = builder @@ -329,7 +329,7 @@ impl AccountLifespan { U256::zero(), calldata, L1_TRANSACTION_GAS_LIMIT.into(), - Some(self.wallet.test_contract.factory_deps.clone()), + Some(self.wallet.test_contract.factory_deps()), None, None, Default::default(), @@ -375,12 +375,13 @@ impl AccountLifespan { } fn prepare_calldata_for_loadnext_contract(&self) -> Vec { - let contract = &self.wallet.test_contract.contract; + let contract = &self.wallet.test_contract.abi; let function = contract.function("execute").unwrap(); function .encode_input(&vec![ ethabi::Token::Uint(U256::from(self.contract_execution_params.reads)), - ethabi::Token::Uint(U256::from(self.contract_execution_params.writes)), + ethabi::Token::Uint(U256::from(self.contract_execution_params.initial_writes)), + ethabi::Token::Uint(U256::from(self.contract_execution_params.repeated_writes)), ethabi::Token::Uint(U256::from(self.contract_execution_params.hashes)), ethabi::Token::Uint(U256::from(self.contract_execution_params.events)), ethabi::Token::Uint(U256::from(self.contract_execution_params.recursive_calls)), @@ -401,7 +402,7 @@ impl AccountLifespan { .start_execute_contract() .calldata(calldata) .contract_address(contract_address) - .factory_deps(self.wallet.test_contract.factory_deps.clone()); + .factory_deps(self.wallet.test_contract.factory_deps()); let fee = builder .estimate_fee(Some(get_approval_based_paymaster_input_for_estimation( diff --git a/core/tests/loadnext/src/account_pool.rs b/core/tests/loadnext/src/account_pool.rs index 3fa3141553cd..6cc8d7f6949c 100644 --- a/core/tests/loadnext/src/account_pool.rs +++ b/core/tests/loadnext/src/account_pool.rs @@ -5,13 +5,13 @@ use once_cell::sync::OnceCell; use rand::Rng; use tokio::time::timeout; use zksync_eth_signer::PrivateKeySigner; +use zksync_test_contracts::TestContract; use zksync_types::{Address, K256PrivateKey, L2ChainId, H256}; use zksync_web3_decl::client::{Client, L2}; use crate::{ config::LoadtestConfig, corrupted_tx::CorruptedSigner, - fs_utils::{loadnext_contract, TestContract}, rng::{LoadtestRng, Random}, sdk::{signer::Signer, Wallet, ZksNamespaceClient}, }; @@ -68,7 +68,7 @@ pub struct TestWallet { /// Wallet with corrupted signer. pub corrupted_wallet: CorruptedSyncWallet, /// Contract bytecode and calldata to be used for sending `Execute` transactions. - pub test_contract: TestContract, + pub test_contract: &'static TestContract, /// Address of the deployed contract to be used for sending /// `Execute` transaction. pub deployed_contract_address: Arc>, @@ -116,7 +116,7 @@ impl AccountPool { anyhow::bail!("ZKsync server does not respond. Please check RPC address and whether server is launched"); } - let test_contract = loadnext_contract(&config.test_contracts_path)?; + let test_contract = TestContract::load_test(); let master_wallet = { let eth_private_key: H256 = config @@ -166,7 +166,7 @@ impl AccountPool { let account = TestWallet { wallet: Arc::new(wallet), corrupted_wallet: Arc::new(corrupted_wallet), - test_contract: test_contract.clone(), + test_contract, deployed_contract_address: deployed_contract_address.clone(), rng: rng.derive(private_key_bytes), }; diff --git a/core/tests/loadnext/src/config.rs b/core/tests/loadnext/src/config.rs index ab578ecfdc6b..c05bf94df04a 100644 --- a/core/tests/loadnext/src/config.rs +++ b/core/tests/loadnext/src/config.rs @@ -1,10 +1,9 @@ -use std::{path::PathBuf, time::Duration}; +use std::time::Duration; use serde::Deserialize; use tokio::sync::Semaphore; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; +use zksync_test_contracts::LoadnextContractExecutionParams; use zksync_types::{network::Network, Address, L2ChainId, H160}; -use zksync_utils::env::Workspace; use crate::fs_utils::read_tokens; @@ -49,28 +48,6 @@ pub struct LoadtestConfig { #[serde(default = "default_main_token")] pub main_token: Address, - /// Path to test contracts bytecode and ABI required for sending - /// deploy and execute L2 transactions. Each folder in the path is expected - /// to have the following structure: - ///```ignore - /// . - /// ├── bytecode - /// └── abi.json - ///``` - /// Contract folder names names are not restricted. - /// - /// An example: - ///```ignore - /// . - /// ├── erc-20 - /// │   ├── bytecode - /// │   └── abi.json - /// └── simple-contract - /// ├── bytecode - /// └── abi.json - ///``` - #[serde(default = "default_test_contracts_path")] - pub test_contracts_path: PathBuf, /// Limits the number of simultaneous API requests being performed at any moment of time. /// /// Setting it to: @@ -189,12 +166,6 @@ fn default_main_token() -> H160 { main_token.address } -fn default_test_contracts_path() -> PathBuf { - let test_contracts_path = Workspace::locate().core().join("etc/contracts-test-data"); - tracing::info!("Test contracts path: {}", test_contracts_path.display()); - test_contracts_path -} - fn default_sync_api_requests_limit() -> usize { let result = 20; tracing::info!("Using default SYNC_API_REQUESTS_LIMIT: {result}"); @@ -281,8 +252,9 @@ impl ExecutionConfig { pub fn from_env() -> Self { let transaction_weights = TransactionWeights::from_env().unwrap_or_else(default_transaction_weights); - let contract_execution_params = LoadnextContractExecutionParams::from_env() - .unwrap_or_else(default_contract_execution_params); + let contract_execution_params = envy::prefixed("CONTRACT_EXECUTION_PARAMS_") + .from_env() + .unwrap_or_else(|_| default_contract_execution_params()); Self { transaction_weights, contract_execution_params, @@ -341,16 +313,3 @@ impl RequestLimiters { } } } - -#[cfg(test)] -mod tests { - - use super::*; - use crate::fs_utils::loadnext_contract; - - #[test] - fn check_read_test_contract() { - let test_contracts_path = default_test_contracts_path(); - loadnext_contract(&test_contracts_path).unwrap(); - } -} diff --git a/core/tests/loadnext/src/fs_utils.rs b/core/tests/loadnext/src/fs_utils.rs index c4472a00531c..0e5107f40861 100644 --- a/core/tests/loadnext/src/fs_utils.rs +++ b/core/tests/loadnext/src/fs_utils.rs @@ -1,10 +1,10 @@ //! Utilities used for reading tokens, contracts bytecode and ABI from the //! filesystem. -use std::{fs::File, io::BufReader, path::Path}; +use std::{fs::File, io::BufReader}; use serde::Deserialize; -use zksync_types::{ethabi::Contract, network::Network, Address}; +use zksync_types::{network::Network, Address}; use zksync_utils::env::Workspace; /// A token stored in `etc/tokens/{network}.json` files. @@ -16,16 +16,6 @@ pub struct Token { pub address: Address, } -#[derive(Debug, Clone)] -pub struct TestContract { - /// Contract bytecode to be used for sending deploy transaction. - pub bytecode: Vec, - /// Contract ABI. - pub contract: Contract, - - pub factory_deps: Vec>, -} - pub fn read_tokens(network: Network) -> anyhow::Result> { let home = Workspace::locate().core(); let path = home.join(format!("etc/tokens/{network}.json")); @@ -34,54 +24,3 @@ pub fn read_tokens(network: Network) -> anyhow::Result> { Ok(serde_json::from_reader(reader)?) } - -fn extract_bytecode(artifact: &serde_json::Value) -> anyhow::Result> { - let bytecode = artifact["bytecode"] - .as_str() - .ok_or_else(|| anyhow::anyhow!("Failed to parse contract bytecode from artifact",))?; - - if let Some(stripped) = bytecode.strip_prefix("0x") { - hex::decode(stripped) - } else { - hex::decode(bytecode) - } - .map_err(|e| e.into()) -} - -/// Reads test contract bytecode and its ABI. -fn read_contract_dir(path: &Path) -> anyhow::Result { - use serde_json::Value; - - let mut artifact: Value = - serde_json::from_reader(File::open(path.join("LoadnextContract.json"))?)?; - - let bytecode = extract_bytecode(&artifact)?; - - let abi = artifact["abi"].take(); - let contract: Contract = serde_json::from_value(abi)?; - - let factory_dep: Value = serde_json::from_reader(File::open(path.join("Foo.json"))?)?; - let factory_dep_bytecode = extract_bytecode(&factory_dep)?; - - anyhow::ensure!( - contract.functions().count() > 0, - "Invalid contract: no methods defined: {:?}", - path - ); - anyhow::ensure!( - contract.events().count() > 0, - "Invalid contract: no events defined: {:?}", - path - ); - - Ok(TestContract { - bytecode, - contract, - factory_deps: vec![factory_dep_bytecode], - }) -} - -pub fn loadnext_contract(path: &Path) -> anyhow::Result { - let path = path.join("artifacts-zk/contracts/loadnext/loadnext_contract.sol"); - read_contract_dir(&path) -} diff --git a/core/tests/loadnext/src/sdk/operations/deploy_contract.rs b/core/tests/loadnext/src/sdk/operations/deploy_contract.rs index 67e877ae8efb..cac49559c468 100644 --- a/core/tests/loadnext/src/sdk/operations/deploy_contract.rs +++ b/core/tests/loadnext/src/sdk/operations/deploy_contract.rs @@ -1,8 +1,8 @@ use zksync_eth_signer::EthereumSigner; use zksync_types::{ - l2::L2Tx, transaction_request::PaymasterParams, Execute, Nonce, CONTRACT_DEPLOYER_ADDRESS, U256, + bytecode::BytecodeHash, l2::L2Tx, transaction_request::PaymasterParams, Execute, Nonce, + CONTRACT_DEPLOYER_ADDRESS, U256, }; -use zksync_utils::bytecode::hash_bytecode; use zksync_web3_decl::namespaces::EthNamespaceClient; use crate::sdk::{ @@ -60,7 +60,7 @@ where None => Nonce(self.wallet.get_nonce().await?), }; - let main_contract_hash = hash_bytecode(&bytecode); + let main_contract_hash = BytecodeHash::for_bytecode(&bytecode).value(); let execute_calldata = Execute::encode_deploy_params_create(Default::default(), main_contract_hash, calldata); @@ -141,7 +141,7 @@ where .unwrap_or_default(); let calldata = self.calldata.clone().unwrap_or_default(); - let main_contract_hash = hash_bytecode(&bytecode); + let main_contract_hash = BytecodeHash::for_bytecode(&bytecode).value(); let mut factory_deps = self.factory_deps.clone().unwrap_or_default(); factory_deps.push(bytecode); let l2_tx = L2Tx::new( diff --git a/core/tests/ts-integration/contracts/custom-account/custom-account.sol b/core/tests/ts-integration/contracts/custom-account/custom-account.sol index fc90355ac64e..991772124771 100644 --- a/core/tests/ts-integration/contracts/custom-account/custom-account.sol +++ b/core/tests/ts-integration/contracts/custom-account/custom-account.sol @@ -9,6 +9,10 @@ import './SystemContractsCaller.sol'; import './interfaces/IAccount.sol'; +interface ITimestampAsserter { + function assertTimestampInRange(uint256 start, uint256 end) external view; +} + contract CustomAccount is IAccount { event BootloaderBalance(uint256); @@ -18,15 +22,28 @@ contract CustomAccount is IAccount { uint256 public gasToSpent; bytes32 public lastTxHash; + address public timestampAsserterAddress; + uint256 public timestampAsserterRangeStart; + uint256 public timestampAsserterRangeEnd; + - constructor(bool _violateValidationRules) { + constructor(bool _violateValidationRules, address _timestampAsserterAddress, uint256 _timestampAsserterRangeStart, uint256 _timestampAsserterRangeEnd) { violateValidationRules = _violateValidationRules; + timestampAsserterAddress = _timestampAsserterAddress; + timestampAsserterRangeStart = _timestampAsserterRangeStart; + timestampAsserterRangeEnd = _timestampAsserterRangeEnd; } // bytes4(keccak256("isValidSignature(bytes32,bytes)") bytes4 constant EIP1271_SUCCESS_RETURN_VALUE = 0x1626ba7e; function validateTransaction(bytes32 _txHash, bytes32 _suggestedSignedTxHash, Transaction calldata _transaction) external payable override returns (bytes4 magic) { + ITimestampAsserter timestampAsserter = ITimestampAsserter(timestampAsserterAddress); + // This assertion exists to ensure that block.timestamp can be accessed in AA by using + // ITimestampAsserter contract + + timestampAsserter.assertTimestampInRange(timestampAsserterRangeStart, timestampAsserterRangeEnd); + magic = _validateTransaction(_suggestedSignedTxHash, _transaction); lastTxHash = _txHash; diff --git a/core/tests/ts-integration/package.json b/core/tests/ts-integration/package.json index 8e5c0cf7470e..ee0fa9c99848 100644 --- a/core/tests/ts-integration/package.json +++ b/core/tests/ts-integration/package.json @@ -17,6 +17,7 @@ "@matterlabs/hardhat-zksync-solc": "^1.2.4", "@matterlabs/hardhat-zksync-vyper": "^1.1.0", "@nomiclabs/hardhat-vyper": "^3.0.6", + "@openzeppelin/contracts": "^4.8.0", "@types/jest": "^29.0.3", "@types/node": "^18.19.15", "@types/node-fetch": "^2.5.7", diff --git a/core/tests/ts-integration/src/env.ts b/core/tests/ts-integration/src/env.ts index 1de917c2362c..26f99d75780f 100644 --- a/core/tests/ts-integration/src/env.ts +++ b/core/tests/ts-integration/src/env.ts @@ -87,7 +87,7 @@ async function loadTestEnvironmentFromFile(fileConfig: FileConfig): Promise { ); const healthcheckPort = process.env.API_HEALTHCHECK_PORT ?? '3071'; + if (!process.env.CONTRACTS_L2_TIMESTAMP_ASSERTER_ADDR) { + throw new Error('CONTRACTS_L2_TIMESTAMP_ASSERTER_ADDR is not defined'); + } + const timestampAsserterAddress = process.env.CONTRACTS_L2_TIMESTAMP_ASSERTER_ADDR.toString(); + + const timestampAsserterMinTimeTillEndSec = parseInt(process.env.TIMESTAMP_ASSERTER_MIN_TIME_TILL_END_SEC!); + return { maxLogsLimit, pathToHome, @@ -312,7 +324,9 @@ export async function loadTestEnvironmentFromEnv(): Promise { decimals: baseToken?.decimals || token.decimals, l1Address: baseToken?.address || token.address, l2Address: baseTokenAddressL2 - } + }, + timestampAsserterAddress, + timestampAsserterMinTimeTillEndSec }; } diff --git a/core/tests/ts-integration/src/types.ts b/core/tests/ts-integration/src/types.ts index c513480c1b41..014031a3dd7e 100644 --- a/core/tests/ts-integration/src/types.ts +++ b/core/tests/ts-integration/src/types.ts @@ -94,6 +94,8 @@ export interface TestEnvironment { */ baseToken: Token; healthcheckPort: string; + timestampAsserterAddress: string; + timestampAsserterMinTimeTillEndSec: number; } /** diff --git a/core/tests/ts-integration/src/utils.ts b/core/tests/ts-integration/src/utils.ts index bb6fa93757ee..f8378c8dff01 100644 --- a/core/tests/ts-integration/src/utils.ts +++ b/core/tests/ts-integration/src/utils.ts @@ -1,17 +1,12 @@ import { spawn as _spawn, ChildProcessWithoutNullStreams, type ProcessEnvOptions } from 'child_process'; import { assert } from 'chai'; -import { FileConfig } from 'utils/build/file-configs'; +import { FileConfig, getConfigPath } from 'utils/build/file-configs'; import { killPidWithAllChilds } from 'utils/build/kill'; import * as utils from 'utils'; import fs from 'node:fs/promises'; import * as zksync from 'zksync-ethers'; -import { - deleteInternalEnforcedL1GasPrice, - deleteInternalEnforcedPubdataPrice, - setInternalEnforcedL1GasPrice, - setInternalEnforcedPubdataPrice, - setTransactionSlots -} from '../tests/utils'; +import * as fsSync from 'fs'; +import YAML from 'yaml'; // executes a command in background and returns a child process handle // by default pipes data to parent's stdio but this can be overridden @@ -105,48 +100,149 @@ export class Node { } } +interface MainNodeOptions { + newL1GasPrice?: bigint; + newPubdataPrice?: bigint; + customBaseToken?: boolean; + externalPriceApiClientForcedNumerator?: number; + externalPriceApiClientForcedDenominator?: number; + externalPriceApiClientForcedFluctuation?: number; + baseTokenPricePollingIntervalMs?: number; + baseTokenAdjusterL1UpdateDeviationPercentage?: number; +} export class NodeSpawner { + private readonly generalConfigPath: string | undefined; + private readonly originalConfig: string | undefined; + public mainNode: Node | null; + public constructor( private readonly pathToHome: string, private readonly logs: fs.FileHandle, private readonly fileConfig: FileConfig, private readonly options: MainNodeSpawnOptions, private env?: ProcessEnvOptions['env'] - ) {} + ) { + this.mainNode = null; + if (fileConfig.loadFromFile) { + this.generalConfigPath = getConfigPath({ + pathToHome, + chain: fileConfig.chain, + configsFolder: 'configs', + config: 'general.yaml' + }); + this.originalConfig = fsSync.readFileSync(this.generalConfigPath, 'utf8'); + } + } + + public async killAndSpawnMainNode(configOverrides: MainNodeOptions | null = null): Promise { + if (this.mainNode != null) { + await this.mainNode.killAndWaitForShutdown(); + this.mainNode = null; + } + this.mainNode = await this.spawnMainNode(configOverrides); + } - public async spawnMainNode(newL1GasPrice?: string, newPubdataPrice?: string): Promise> { + private async spawnMainNode(overrides: MainNodeOptions | null): Promise> { const env = this.env ?? process.env; const { fileConfig, pathToHome, options, logs } = this; - const testMode = newPubdataPrice || newL1GasPrice; + const testMode = overrides?.newPubdataPrice != null || overrides?.newL1GasPrice != null; - console.log('New L1 Gas Price: ', newL1GasPrice); - console.log('New Pubdata Price: ', newPubdataPrice); + console.log('Overrides: ', overrides); if (fileConfig.loadFromFile) { - setTransactionSlots(pathToHome, fileConfig, testMode ? 1 : 8192); + this.restoreConfig(); + const config = this.readFileConfig(); + config['state_keeper']['transaction_slots'] = testMode ? 1 : 8192; - if (newL1GasPrice) { - setInternalEnforcedL1GasPrice(pathToHome, fileConfig, parseFloat(newL1GasPrice)); - } else { - deleteInternalEnforcedL1GasPrice(pathToHome, fileConfig); - } + if (overrides != null) { + if (overrides.newL1GasPrice) { + config['eth']['gas_adjuster']['internal_enforced_l1_gas_price'] = overrides.newL1GasPrice; + } + + if (overrides.newPubdataPrice) { + config['eth']['gas_adjuster']['internal_enforced_pubdata_price'] = overrides.newPubdataPrice; + } + + if (overrides.externalPriceApiClientForcedNumerator !== undefined) { + config['external_price_api_client']['forced_numerator'] = + overrides.externalPriceApiClientForcedNumerator; + } + + if (overrides.externalPriceApiClientForcedDenominator !== undefined) { + config['external_price_api_client']['forced_denominator'] = + overrides.externalPriceApiClientForcedDenominator; + } + + if (overrides.externalPriceApiClientForcedFluctuation !== undefined) { + config['external_price_api_client']['forced_fluctuation'] = + overrides.externalPriceApiClientForcedFluctuation; + } + + if (overrides.baseTokenPricePollingIntervalMs !== undefined) { + const cacheUpdateInterval = overrides.baseTokenPricePollingIntervalMs / 2; + // To reduce price polling interval we also need to reduce base token receipt checking and tx sending sleeps as they are blocking the poller. Also cache update needs to be reduced appropriately. + + config['base_token_adjuster']['l1_receipt_checking_sleep_ms'] = + overrides.baseTokenPricePollingIntervalMs; + config['base_token_adjuster']['l1_tx_sending_sleep_ms'] = overrides.baseTokenPricePollingIntervalMs; + config['base_token_adjuster']['price_polling_interval_ms'] = + overrides.baseTokenPricePollingIntervalMs; + config['base_token_adjuster']['price_cache_update_interval_ms'] = cacheUpdateInterval; + } - if (newPubdataPrice) { - setInternalEnforcedPubdataPrice(pathToHome, fileConfig, parseFloat(newPubdataPrice)); - } else { - deleteInternalEnforcedPubdataPrice(pathToHome, fileConfig); + if (overrides.baseTokenAdjusterL1UpdateDeviationPercentage !== undefined) { + config['base_token_adjuster']['l1_update_deviation_percentage'] = + overrides.baseTokenAdjusterL1UpdateDeviationPercentage; + } } + + this.writeFileConfig(config); } else { env['DATABASE_MERKLE_TREE_MODE'] = 'full'; - if (newPubdataPrice) { - env['ETH_SENDER_GAS_ADJUSTER_INTERNAL_ENFORCED_PUBDATA_PRICE'] = newPubdataPrice; - } + if (overrides != null) { + if (overrides.newPubdataPrice) { + env['ETH_SENDER_GAS_ADJUSTER_INTERNAL_ENFORCED_PUBDATA_PRICE'] = + overrides.newPubdataPrice.toString(); + } - if (newL1GasPrice) { - // We need to ensure that each transaction gets into its own batch for more fair comparison. - env['ETH_SENDER_GAS_ADJUSTER_INTERNAL_ENFORCED_L1_GAS_PRICE'] = newL1GasPrice; + if (overrides.newL1GasPrice) { + // We need to ensure that each transaction gets into its own batch for more fair comparison. + env['ETH_SENDER_GAS_ADJUSTER_INTERNAL_ENFORCED_L1_GAS_PRICE'] = overrides.newL1GasPrice.toString(); + } + + if (overrides.externalPriceApiClientForcedNumerator !== undefined) { + env['EXTERNAL_PRICE_API_CLIENT_FORCED_NUMERATOR'] = + overrides.externalPriceApiClientForcedNumerator.toString(); + } + + if (overrides.externalPriceApiClientForcedDenominator !== undefined) { + env['EXTERNAL_PRICE_API_CLIENT_FORCED_DENOMINATOR'] = + overrides.externalPriceApiClientForcedDenominator.toString(); + } + + if (overrides.externalPriceApiClientForcedFluctuation !== undefined) { + env['EXTERNAL_PRICE_API_CLIENT_FORCED_FLUCTUATION'] = + overrides.externalPriceApiClientForcedFluctuation.toString(); + } + + if (overrides.baseTokenPricePollingIntervalMs !== undefined) { + const cacheUpdateInterval = overrides.baseTokenPricePollingIntervalMs / 2; + // To reduce price polling interval we also need to reduce base token receipt checking and tx sending sleeps as they are blocking the poller. Also cache update needs to be reduced appropriately. + env['BASE_TOKEN_ADJUSTER_L1_RECEIPT_CHECKING_SLEEP_MS'] = + overrides.baseTokenPricePollingIntervalMs.toString(); + env['BASE_TOKEN_ADJUSTER_L1_TX_SENDING_SLEEP_MS'] = + overrides.baseTokenPricePollingIntervalMs.toString(); + env['BASE_TOKEN_ADJUSTER_PRICE_POLLING_INTERVAL_MS'] = + overrides.baseTokenPricePollingIntervalMs.toString(); + env['BASE_TOKEN_ADJUSTER_PRICE_CACHE_UPDATE_INTERVAL_MS'] = cacheUpdateInterval.toString(); + } + + if (overrides.baseTokenAdjusterL1UpdateDeviationPercentage !== undefined) { + env['BASE_TOKEN_ADJUSTER_L1_UPDATE_DEVIATION_PERCENTAGE'] = + overrides.baseTokenAdjusterL1UpdateDeviationPercentage.toString(); + } } if (testMode) { @@ -175,6 +271,26 @@ export class NodeSpawner { await waitForNodeToStart(proc, options.apiWeb3JsonRpcHttpUrl); return new Node(proc, options.apiWeb3JsonRpcHttpUrl, NodeType.MAIN); } + + public restoreConfig() { + if (this.generalConfigPath != void 0 && this.originalConfig != void 0) + fsSync.writeFileSync(this.generalConfigPath, this.originalConfig, 'utf8'); + } + + private readFileConfig() { + if (this.generalConfigPath == void 0) + throw new Error('Trying to set property in config while not in file mode'); + const generalConfig = fsSync.readFileSync(this.generalConfigPath, 'utf8'); + return YAML.parse(generalConfig); + } + + private writeFileConfig(config: any) { + if (this.generalConfigPath == void 0) + throw new Error('Trying to set property in config while not in file mode'); + + const newGeneralConfig = YAML.stringify(config); + fsSync.writeFileSync(this.generalConfigPath, newGeneralConfig, 'utf8'); + } } async function waitForNodeToStart(proc: ChildProcessWithoutNullStreams, l2Url: string) { diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index 6f1b6c3aa6b8..ceed9654df91 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -189,7 +189,8 @@ describe('web3 API compatibility tests', () => { ['eth_getCompilers', [], []], ['eth_hashrate', [], '0x0'], ['eth_mining', [], false], - ['eth_getUncleCountByBlockNumber', ['0x0'], '0x0'] + ['eth_getUncleCountByBlockNumber', ['0x0'], '0x0'], + ['eth_maxPriorityFeePerGas', [], '0x0'] ])('Should test bogus web3 methods (%s)', async (method: string, input: string[], output: string) => { await expect(alice.provider.send(method, input)).resolves.toEqual(output); }); @@ -271,7 +272,8 @@ describe('web3 API compatibility tests', () => { const eip1559ApiReceipt = await alice.provider.getTransaction(eip1559Tx.hash); expect(eip1559ApiReceipt.maxFeePerGas).toEqual(eip1559Tx.maxFeePerGas!); - expect(eip1559ApiReceipt.maxPriorityFeePerGas).toEqual(eip1559Tx.maxPriorityFeePerGas!); + // `ethers` will use value provided by `eth_maxPriorityFeePerGas`, and we return 0 there. + expect(eip1559ApiReceipt.maxPriorityFeePerGas).toEqual(0n); }); test('Should test getFilterChanges for pending transactions', async () => { diff --git a/core/tests/ts-integration/tests/base-token.test.ts b/core/tests/ts-integration/tests/base-token.test.ts index 8ecc9de3ddb9..432ce70ae17f 100644 --- a/core/tests/ts-integration/tests/base-token.test.ts +++ b/core/tests/ts-integration/tests/base-token.test.ts @@ -39,9 +39,8 @@ describe('base ERC20 contract checks', () => { const numerator = Number(await zksyncContract.baseTokenGasPriceMultiplierNominator()); const denominator = Number(await zksyncContract.baseTokenGasPriceMultiplierDenominator()); - // checking that the numerator and denominator don't have their default values - expect(numerator).toBe(3); - expect(denominator).toBe(2); + expect(numerator).toBe(314); + expect(denominator).toBe(1000); }); test('Can perform a deposit', async () => { diff --git a/core/tests/ts-integration/tests/contracts.test.ts b/core/tests/ts-integration/tests/contracts.test.ts index b17c2b335989..de1c632ab9cc 100644 --- a/core/tests/ts-integration/tests/contracts.test.ts +++ b/core/tests/ts-integration/tests/contracts.test.ts @@ -423,35 +423,6 @@ describe('Smart contract behavior checks', () => { expect(receipt.status).toEqual(1); }); - test('Should check transient storage', async () => { - const artifact = require(`${ - testMaster.environment().pathToHome - }/etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json`); - const contractFactory = new zksync.ContractFactory(artifact.abi, artifact.bytecode, alice); - const storageContract = (await contractFactory.deploy()) as zksync.Contract; - await storageContract.waitForDeployment(); - // Tests transient storage, see contract code for details. - await expect(storageContract.testTransientStore()).toBeAccepted([]); - // Checks that transient storage is cleaned up after each tx. - await expect(storageContract.assertTValue(0)).toBeAccepted([]); - }); - - test('Should check code oracle works', async () => { - // Deploy contract that calls CodeOracle. - const artifact = require(`${ - testMaster.environment().pathToHome - }/etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json`); - const contractFactory = new zksync.ContractFactory(artifact.abi, artifact.bytecode, alice); - const contract = (await contractFactory.deploy()) as zksync.Contract; - await contract.waitForDeployment(); - - // Check that CodeOracle can decommit code of just deployed contract. - const versionedHash = zksync.utils.hashBytecode(artifact.bytecode); - const expectedBytecodeHash = ethers.keccak256(artifact.bytecode); - - await expect(contract.callCodeOracle(versionedHash, expectedBytecodeHash)).toBeAccepted([]); - }); - afterAll(async () => { await testMaster.deinitialize(); }); diff --git a/core/tests/ts-integration/tests/custom-account.test.ts b/core/tests/ts-integration/tests/custom-account.test.ts index 46ddba95323a..ebbe11b87195 100644 --- a/core/tests/ts-integration/tests/custom-account.test.ts +++ b/core/tests/ts-integration/tests/custom-account.test.ts @@ -18,6 +18,9 @@ const contracts = { // We create multiple custom accounts and we need to fund them with ETH to pay for fees. const ETH_PER_CUSTOM_ACCOUNT = L2_DEFAULT_ETH_PER_ACCOUNT / 8n; const TRANSFER_AMOUNT = 1n; +const DEFAULT_TIMESTAMP_ASSERTER_RANGE_START = 0; +// 2555971200 is a number of seconds up to 30/12/2050 +const DEFAULT_TIMESTAMP_ASSERTER_RANGE_END = 2555971200; describe('Tests for the custom account behavior', () => { let testMaster: TestMaster; @@ -25,11 +28,13 @@ describe('Tests for the custom account behavior', () => { let customAccount: zksync.Contract; let erc20Address: string; let erc20: zksync.Contract; + let timestampAsserterAddress: string; beforeAll(() => { testMaster = TestMaster.getInstance(__filename); alice = testMaster.mainAccount(); erc20Address = testMaster.environment().erc20Token.l2Address; + timestampAsserterAddress = testMaster.environment().timestampAsserterAddress; erc20 = new zksync.Contract( erc20Address, zksync.utils.IERC20, @@ -40,7 +45,17 @@ describe('Tests for the custom account behavior', () => { test('Should deploy custom account', async () => { const violateRules = false; - customAccount = await deployContract(alice, contracts.customAccount, [violateRules], 'createAccount'); + customAccount = await deployContract( + alice, + contracts.customAccount, + [ + violateRules, + timestampAsserterAddress, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_START, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_END + ], + 'createAccount' + ); // Now we need to check that it was correctly marked as an account: const contractAccountInfo = await alice.provider.getContractAccountInfo(await customAccount.getAddress()); @@ -50,6 +65,8 @@ describe('Tests for the custom account behavior', () => { // Checking that the nonce ordering is correct expect(contractAccountInfo.nonceOrdering).toEqual(zksync.types.AccountNonceOrdering.Sequential); + + return customAccount; }); test('Should fund the custom account', async () => { @@ -60,7 +77,7 @@ describe('Tests for the custom account behavior', () => { .transfer({ to: await customAccount.getAddress(), token: erc20Address, - amount: ERC20_PER_ACCOUNT / 4n + amount: ERC20_PER_ACCOUNT / 8n }) .then((tx) => tx.wait()); }); @@ -95,6 +112,122 @@ describe('Tests for the custom account behavior', () => { ).toBeAccepted([erc20BalanceChange, feeCheck]); }); + test('Should fail transaction validation due to timestamp assertion in the validation tracer - close to the range end', async () => { + const now = Math.floor(Date.now() / 1000); + const minTimeTillEnd = testMaster.environment().timestampAsserterMinTimeTillEndSec; + const rangeStart = now - 10; + const rangeEnd = now + minTimeTillEnd / 2; + + const customAccount = await deployAndFundCustomAccount( + alice, + erc20Address, + timestampAsserterAddress, + rangeStart, + rangeEnd + ); + + const tx = await erc20.transfer.populateTransaction(alice.address, TRANSFER_AMOUNT); + + await expect( + sendCustomAccountTransaction( + tx as zksync.types.Transaction, + alice.provider, + await customAccount.getAddress(), + testMaster.environment().l2ChainId + ) + ).toBeRejected( + 'failed to validate the transaction. reason: Violated validation rules: block.timestamp is too close to the range end' + ); + }); + + test('Should execute contract by custom account when timestamp asserter range end overflows', async () => { + // This test ensures that a custom account transaction completes successfully + // even when the timestamp asserter's range end exceeds `u64::MAX`. In such cases, + // the range is capped at `u64::MAX` and processed as expected. + const customAccount = await deployAndFundCustomAccount( + alice, + erc20Address, + timestampAsserterAddress, + 0, + BigInt('3402823669209384634633746074317682') // u128::MAX + ); + + const tx = await erc20.transfer.populateTransaction(alice.address, TRANSFER_AMOUNT); + const customAccountAddress = await customAccount.getAddress(); + const erc20BalanceChange = await shouldChangeTokenBalances(erc20Address, [ + { + addressToCheck: customAccountAddress, + wallet: alice, + change: -TRANSFER_AMOUNT + }, + { wallet: alice, change: TRANSFER_AMOUNT } + ]); + const feeCheck = await shouldChangeETHBalances([ + { addressToCheck: customAccountAddress, wallet: alice, change: 0n } + ]); + + await expect( + sendCustomAccountTransaction( + tx as zksync.types.Transaction, + alice.provider, + await customAccount.getAddress(), + testMaster.environment().l2ChainId + ) + ).toBeAccepted([erc20BalanceChange, feeCheck]); + }); + + test('Should fail to estimate fee due to block.timestamp assertion in the smart contract', async () => { + const now = Math.floor(Date.now() / 1000); + const rangeStart = now + 300; + const rangeEnd = now + 1000; + + const customAccount = await deployAndFundCustomAccount( + alice, + erc20Address, + timestampAsserterAddress, + rangeStart, + rangeEnd + ); + const customAccountAddress = await customAccount.getAddress(); + + const tx = await erc20.transfer.populateTransaction(alice.address, TRANSFER_AMOUNT); + + try { + await sendCustomAccountTransaction( + tx as zksync.types.Transaction, + alice.provider, + customAccountAddress, + testMaster.environment().l2ChainId, + undefined, + undefined, + false + ); + expect(null).fail('The transaction was expected to fail'); + } catch (e) { + const err = e as Error; + expect(err.message).toContain( + 'failed to validate the transaction. reason: Validation revert: Account validation error' + ); + const functionSelectorMatch = err.message.match(/function_selector\s=\s(0x[0-9a-fA-F]{8})/); + const calldataMatch = err.message.match(/data\s=\s(0x[0-9a-fA-F]+)/); + + expect(functionSelectorMatch && calldataMatch).toBeTruthy(); + + const functionSelector = functionSelectorMatch![1]; + expect(functionSelector).toBe('0x3d5740d9'); + + const calldata = calldataMatch![1]; + + const startHex = calldata.slice(74, 138); + const endHex = calldata.slice(138); + const start = BigInt(`0x${startHex}`); + const end = BigInt(`0x${endHex}`); + + expect(start).toBe(BigInt(rangeStart)); + expect(end).toBe(BigInt(rangeEnd)); + } + }); + test('Should fail the validation with incorrect signature', async () => { const tx = await erc20.transfer.populateTransaction(alice.address, TRANSFER_AMOUNT); const fakeSignature = new Uint8Array(12); @@ -112,7 +245,17 @@ describe('Tests for the custom account behavior', () => { test('Should not allow violating validation rules', async () => { // We configure account to violate storage access rules during tx validation. const violateRules = true; - const badCustomAccount = await deployContract(alice, contracts.customAccount, [violateRules], 'createAccount'); + const badCustomAccount = await deployContract( + alice, + contracts.customAccount, + [ + violateRules, + timestampAsserterAddress, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_START, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_END + ], + 'createAccount' + ); const badCustomAccountAddress = await badCustomAccount.getAddress(); // Fund the account. @@ -145,7 +288,17 @@ describe('Tests for the custom account behavior', () => { // Note that we supply "create" instead of "createAccount" here -- the code is the same, but it'll // be treated as a common contract. const violateRules = false; - const nonAccount = await deployContract(alice, contracts.customAccount, [violateRules], 'create'); + const nonAccount = await deployContract( + alice, + contracts.customAccount, + [ + violateRules, + timestampAsserterAddress, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_START, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_END + ], + 'create' + ); const nonAccountAddress = await nonAccount.getAddress(); // Fund the account. @@ -203,7 +356,12 @@ describe('Tests for the custom account behavior', () => { const badCustomAccount = await deployContract( alice, contracts.customAccount, - [violateStorageRules], + [ + violateStorageRules, + timestampAsserterAddress, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_START, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_END + ], 'createAccount' ); const badCustomAccountAddress = await badCustomAccount.getAddress(); @@ -244,7 +402,12 @@ describe('Tests for the custom account behavior', () => { const badCustomAccount = await deployContract( alice, contracts.customAccount, - [violateStorageRules], + [ + violateStorageRules, + timestampAsserterAddress, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_START, + DEFAULT_TIMESTAMP_ASSERTER_RANGE_END + ], 'createAccount' ); const badCustomAccountAddress = await badCustomAccount.getAddress(); @@ -316,12 +479,11 @@ async function sendCustomAccountTransaction( accountAddress: string, chainId: bigint, customSignature?: Uint8Array, - nonce?: number + nonce?: number, + estimateGas: boolean = true ) { - const gasLimit = await browserProvider.estimateGas({ - ...tx, - from: accountAddress - }); + const gasLimit = estimateGas ? await browserProvider.estimateGas({ ...tx, from: accountAddress }) : BigInt(100_000); // Enough gas to invoke AA contract + const gasPrice = await browserProvider.getGasPrice(); tx.gasLimit = gasLimit; @@ -345,3 +507,30 @@ async function sendCustomAccountTransaction( return await browserProvider.broadcastTransaction(serializedTx); } + +async function deployAndFundCustomAccount( + richAccount: zksync.Wallet, + erc20Address: string, + timestampAsserterAddress: string, + rangeStart: any, + rangeEnd: any +): Promise { + const customAccount = await deployContract( + richAccount, + contracts.customAccount, + [false, timestampAsserterAddress, rangeStart, rangeEnd], + 'createAccount' + ); + + await richAccount + .transfer({ to: await customAccount.getAddress(), amount: ETH_PER_CUSTOM_ACCOUNT }) + .then((tx) => tx.wait()); + await richAccount + .transfer({ + to: await customAccount.getAddress(), + token: erc20Address, + amount: ERC20_PER_ACCOUNT / 8n + }) + .then((tx) => tx.wait()); + return customAccount; +} diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index e99d3b67911b..fc156e03f16d 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -15,13 +15,15 @@ import { TestContextOwner, TestMaster } from '../src'; import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { DataAvailabityMode, Token } from '../src/types'; -import { SYSTEM_CONTEXT_ADDRESS, getTestContract } from '../src/helpers'; +import { SYSTEM_CONTEXT_ADDRESS, getTestContract, waitForNewL1Batch, anyTransaction } from '../src/helpers'; import { loadConfig, shouldLoadConfigFromFile } from 'utils/build/file-configs'; import { logsTestPath } from 'utils/build/logs'; -import path from 'path'; -import { NodeSpawner, Node, NodeType } from '../src/utils'; -import { deleteInternalEnforcedL1GasPrice, deleteInternalEnforcedPubdataPrice, setTransactionSlots } from './utils'; +import { sleep } from 'utils/build'; import { killPidWithAllChilds } from 'utils/build/kill'; +import path from 'path'; +import { NodeSpawner } from '../src/utils'; +import { sendTransfers } from '../src/context-owner'; +import { Reporter } from '../src/reporter'; declare global { var __ZKSYNC_TEST_CONTEXT_OWNER__: TestContextOwner; @@ -60,13 +62,13 @@ testFees('Test fees', function () { let tokenDetails: Token; let aliceErc20: zksync.Contract; + let isETHBasedChain: boolean; let mainLogs: fs.FileHandle; let baseTokenAddress: string; let ethClientWeb3Url: string; let apiWeb3JsonRpcHttpUrl: string; let mainNodeSpawner: NodeSpawner; - let mainNode: Node; const fileConfig = shouldLoadConfigFromFile(); const pathToHome = path.join(__dirname, '../../../..'); @@ -121,11 +123,41 @@ testFees('Test fees', function () { baseTokenAddress }); - mainNode = await mainNodeSpawner.spawnMainNode(); + await mainNodeSpawner.killAndSpawnMainNode(); alice = testMaster.mainAccount(); tokenDetails = testMaster.environment().erc20Token; aliceErc20 = new ethers.Contract(tokenDetails.l1Address, zksync.utils.IERC20, alice.ethWallet()); + + const mainWallet = new zksync.Wallet( + testMaster.environment().mainWalletPK, + alice._providerL2(), + alice._providerL1() + ); + + isETHBasedChain = baseTokenAddress == zksync.utils.ETH_ADDRESS_IN_CONTRACTS; + + // On non ETH based chains the standard deposit is not enough to run all this tests + if (!isETHBasedChain) { + const depositTx = await mainWallet.deposit({ + token: baseTokenAddress, + amount: ethers.parseEther('100'), + approveERC20: true, + approveBaseERC20: true + }); + await depositTx.wait(); + await Promise.all( + await sendTransfers( + zksync.utils.ETH_ADDRESS, + mainWallet, + { alice: alice.privateKey }, + ethers.parseEther('100'), + undefined, + undefined, + new Reporter() + ) + ); + } }); test('Test all fees', async () => { @@ -177,8 +209,10 @@ testFees('Test fees', function () { ]; for (const gasPrice of L1_GAS_PRICES_TO_TEST) { // For the sake of simplicity, we'll use the same pubdata price as the L1 gas price. - await mainNode.killAndWaitForShutdown(); - mainNode = await mainNodeSpawner.spawnMainNode(gasPrice.toString(), gasPrice.toString()); + await mainNodeSpawner.killAndSpawnMainNode({ + newL1GasPrice: gasPrice, + newPubdataPrice: gasPrice + }); reports = await appendResults( alice, @@ -213,6 +247,96 @@ testFees('Test fees', function () { console.log(`Full report: \n\n${reports.join('\n\n')}`); }); + test('Test gas price expected value', async () => { + const l1GasPrice = 2_000_000_000n; /// set to 2 gwei + await mainNodeSpawner.killAndSpawnMainNode({ + newL1GasPrice: l1GasPrice, + newPubdataPrice: l1GasPrice + }); + + // wait for new batch so gas price is updated with new config set above + await waitForNewL1Batch(alice); + + const receipt = await anyTransaction(alice); + + const feeParams = await alice._providerL2().getFeeParams(); + const feeConfig = feeParams.V2.config; + // type is missing conversion_ratio field + const conversionRatio: { numerator: bigint; denominator: bigint } = (feeParams.V2 as any)['conversion_ratio']; + if (isETHBasedChain) { + expect(conversionRatio.numerator).toBe(1); //number not bigint for some reason + expect(conversionRatio.denominator).toBe(1); + } else { + expect(conversionRatio.numerator).toBeGreaterThan(1n); + } + + // the minimum + compute overhead of 0.01gwei in validium mode + const expectedETHGasPrice = + feeConfig.minimal_l2_gas_price + + (feeConfig.compute_overhead_part * feeParams.V2.l1_gas_price * feeConfig.batch_overhead_l1_gas) / + feeConfig.max_gas_per_batch; + const expectedConvertedGasPrice = + (expectedETHGasPrice * conversionRatio.numerator) / conversionRatio.denominator; + + expect(receipt.gasPrice).toBe(BigInt(expectedConvertedGasPrice)); + }); + + test('Test base token ratio fluctuations', async () => { + const l1GasPrice = 2_000_000_000n; /// set to 2 gwei + + if (isETHBasedChain) return; + + await mainNodeSpawner.killAndSpawnMainNode({ + newL1GasPrice: l1GasPrice, + newPubdataPrice: l1GasPrice, + externalPriceApiClientForcedNumerator: 300, + externalPriceApiClientForcedDenominator: 100, + externalPriceApiClientForcedFluctuation: 20, + baseTokenPricePollingIntervalMs: 1000, + baseTokenAdjusterL1UpdateDeviationPercentage: 0 + }); + + const beginFeeParams = await alice._providerL2().getFeeParams(); + const mainContract = await alice.getMainContract(); + const beginL1Nominator = await mainContract.baseTokenGasPriceMultiplierNominator(); + let changedL2 = false; + let changedL1 = false; + for (let i = 0; i < 20; i++) { + await sleep(0.5); + const newFeeParams = await alice._providerL2().getFeeParams(); + // we need any as FeeParams is missing existing conversion_ratio field + + if ( + ((newFeeParams.V2 as any)['conversion_ratio'].numerator as number) != + ((beginFeeParams.V2 as any)['conversion_ratio'].numerator as number) + ) { + // @ts-ignore + const diff = + (newFeeParams.V2 as any)['conversion_ratio'].numerator - + (beginFeeParams.V2 as any)['conversion_ratio'].numerator; + // Deviation is 20%, Adding 5% extra for any arithmetic precision issues, 25%*300 = 75 + expect(diff).toBeLessThan(75); + expect(diff).toBeGreaterThan(-75); + changedL2 = true; + break; + } + } + expect(changedL2).toBeTruthy(); + for (let i = 0; i < 10; i++) { + const newL1Nominator = await mainContract.baseTokenGasPriceMultiplierNominator(); + if (newL1Nominator != beginL1Nominator) { + const diff = newL1Nominator - beginL1Nominator; + expect(diff).toBeLessThan(75); // as above + expect(diff).toBeGreaterThan(-75); + changedL1 = true; + break; + } + await sleep(0.5); + } + + expect(changedL1).toBeTruthy(); + }); + test('Test gas consumption under large L1 gas price', async () => { if (testMaster.environment().l1BatchCommitDataGeneratorMode === DataAvailabityMode.Validium) { // We skip this test for Validium mode, since L1 gas price has little impact on the gasLimit in this mode. @@ -233,11 +357,10 @@ testFees('Test fees', function () { // that the gasLimit is indeed over u32::MAX, which is the most important tested property. const requiredPubdataPrice = minimalL2GasPrice * 100_000n; - await mainNode.killAndWaitForShutdown(); - mainNode = await mainNodeSpawner.spawnMainNode( - requiredPubdataPrice.toString(), - requiredPubdataPrice.toString() - ); + await mainNodeSpawner.killAndSpawnMainNode({ + newL1GasPrice: requiredPubdataPrice, + newPubdataPrice: requiredPubdataPrice + }); const l1Messenger = new ethers.Contract(zksync.utils.L1_MESSENGER_ADDRESS, zksync.utils.L1_MESSENGER, alice); @@ -278,16 +401,11 @@ testFees('Test fees', function () { }); afterAll(async () => { - await mainNode.killAndWaitForShutdown(); + await mainNodeSpawner.killAndSpawnMainNode(); // Returning the pubdata price to the default one - - // Restore defaults - setTransactionSlots(pathToHome, fileConfig, 8192); - deleteInternalEnforcedL1GasPrice(pathToHome, fileConfig); - deleteInternalEnforcedPubdataPrice(pathToHome, fileConfig); - mainNode = await mainNodeSpawner.spawnMainNode(); + // Spawning with no options restores defaults. await testMaster.deinitialize(); - __ZKSYNC_TEST_CONTEXT_OWNER__.setL2NodePid(mainNode.proc.pid!); + __ZKSYNC_TEST_CONTEXT_OWNER__.setL2NodePid(mainNodeSpawner.mainNode!.proc.pid!); }); }); diff --git a/core/tests/ts-integration/tests/utils.ts b/core/tests/ts-integration/tests/utils.ts deleted file mode 100644 index 24df8a170c20..000000000000 --- a/core/tests/ts-integration/tests/utils.ts +++ /dev/null @@ -1,81 +0,0 @@ -import * as fs from 'fs'; -import { getConfigPath } from 'utils/build/file-configs'; - -export function setInternalEnforcedPubdataPrice(pathToHome: string, fileConfig: any, value: number) { - setGasAdjusterProperty(pathToHome, fileConfig, 'internal_enforced_pubdata_price', value); -} - -export function setInternalEnforcedL1GasPrice(pathToHome: string, fileConfig: any, value: number) { - setGasAdjusterProperty(pathToHome, fileConfig, 'internal_enforced_l1_gas_price', value); -} - -export function deleteInternalEnforcedPubdataPrice(pathToHome: string, fileConfig: any) { - deleteProperty(pathToHome, fileConfig, 'internal_enforced_pubdata_price'); -} - -export function deleteInternalEnforcedL1GasPrice(pathToHome: string, fileConfig: any) { - deleteProperty(pathToHome, fileConfig, 'internal_enforced_l1_gas_price'); -} - -export function setTransactionSlots(pathToHome: string, fileConfig: any, value: number) { - setPropertyInGeneralConfig(pathToHome, fileConfig, 'transaction_slots', value); -} - -function setPropertyInGeneralConfig(pathToHome: string, fileConfig: any, property: string, value: number) { - const generalConfigPath = getConfigPath({ - pathToHome, - chain: fileConfig.chain, - configsFolder: 'configs', - config: 'general.yaml' - }); - const generalConfig = fs.readFileSync(generalConfigPath, 'utf8'); - - const regex = new RegExp(`${property}:\\s*\\d+(\\.\\d+)?`, 'g'); - const newGeneralConfig = generalConfig.replace(regex, `${property}: ${value}`); - - fs.writeFileSync(generalConfigPath, newGeneralConfig, 'utf8'); -} - -function setGasAdjusterProperty(pathToHome: string, fileConfig: any, property: string, value: number) { - const generalConfigPath = getConfigPath({ - pathToHome, - chain: fileConfig.chain, - configsFolder: 'configs', - config: 'general.yaml' - }); - const generalConfig = fs.readFileSync(generalConfigPath, 'utf8'); - - // Define the regex pattern to check if the property already exists - const propertyRegex = new RegExp(`(^\\s*${property}:\\s*\\d+(\\.\\d+)?$)`, 'm'); - const gasAdjusterRegex = new RegExp('(^\\s*gas_adjuster:.*$)', 'gm'); - - let newGeneralConfig; - - if (propertyRegex.test(generalConfig)) { - // If the property exists, modify its value - newGeneralConfig = generalConfig.replace(propertyRegex, ` ${property}: ${value}`); - } else { - // If the property does not exist, add it under the gas_adjuster section - newGeneralConfig = generalConfig.replace(gasAdjusterRegex, `$1\n ${property}: ${value}`); - } - - fs.writeFileSync(generalConfigPath, newGeneralConfig, 'utf8'); -} - -function deleteProperty(pathToHome: string, fileConfig: any, property: string) { - const generalConfigPath = getConfigPath({ - pathToHome, - chain: fileConfig.chain, - configsFolder: 'configs', - config: 'general.yaml' - }); - const generalConfig = fs.readFileSync(generalConfigPath, 'utf8'); - - // Define the regex pattern to find the property line and remove it completely - const propertyRegex = new RegExp(`^\\s*${property}:.*\\n?`, 'm'); - - // Remove the line if the property exists - const newGeneralConfig = generalConfig.replace(propertyRegex, ''); - - fs.writeFileSync(generalConfigPath, newGeneralConfig, 'utf8'); -} diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml index 59c1e21493b4..eb4a5a239252 100644 --- a/core/tests/vm-benchmark/Cargo.toml +++ b/core/tests/vm-benchmark/Cargo.toml @@ -7,9 +7,9 @@ publish = false [dependencies] zksync_contracts.workspace = true +zksync_test_contracts.workspace = true zksync_multivm.workspace = true zksync_types.workspace = true -zksync_utils.workspace = true zksync_vlog.workspace = true zksync_vm2.workspace = true @@ -21,7 +21,7 @@ tokio.workspace = true [dev-dependencies] assert_matches.workspace = true -iai.workspace = true +yab.workspace = true [[bench]] name = "oneshot" @@ -32,5 +32,5 @@ name = "batch" harness = false [[bench]] -name = "iai" +name = "instructions" harness = false diff --git a/core/tests/vm-benchmark/benches/iai.rs b/core/tests/vm-benchmark/benches/iai.rs deleted file mode 100644 index 8cbb9f10dd83..000000000000 --- a/core/tests/vm-benchmark/benches/iai.rs +++ /dev/null @@ -1,35 +0,0 @@ -use iai::black_box; -use vm_benchmark::{BenchmarkingVm, BenchmarkingVmFactory, Bytecode, Fast, Legacy}; - -fn run_bytecode(name: &str) { - let tx = Bytecode::get(name).deploy_tx(); - black_box(BenchmarkingVm::::default().run_transaction(&tx)); -} - -macro_rules! make_functions_and_main { - ($($file:ident => $legacy_name:ident,)+) => { - $( - fn $file() { - run_bytecode::(stringify!($file)); - } - - fn $legacy_name() { - run_bytecode::(stringify!($file)); - } - )+ - - iai::main!($($file, $legacy_name,)+); - }; -} - -make_functions_and_main!( - access_memory => access_memory_legacy, - call_far => call_far_legacy, - decode_shl_sub => decode_shl_sub_legacy, - deploy_simple_contract => deploy_simple_contract_legacy, - finish_eventful_frames => finish_eventful_frames_legacy, - write_and_decode => write_and_decode_legacy, - event_spam => event_spam_legacy, - slot_hash_collision => slot_hash_collision_legacy, - heap_read_write => heap_read_write_legacy, -); diff --git a/core/tests/vm-benchmark/benches/instructions.rs b/core/tests/vm-benchmark/benches/instructions.rs new file mode 100644 index 000000000000..654dfef71b29 --- /dev/null +++ b/core/tests/vm-benchmark/benches/instructions.rs @@ -0,0 +1,206 @@ +//! Measures the number of host instructions required to run the benchmark bytecodes. + +use std::{env, sync::mpsc}; + +use vise::{Gauge, LabeledFamily, Metrics}; +use vm_benchmark::{ + criterion::PrometheusRuntime, BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, BYTECODES, +}; +use yab::{ + reporter::{BenchmarkOutput, BenchmarkReporter, Reporter}, + AccessSummary, BenchMode, Bencher, BenchmarkId, +}; + +fn benchmarks_for_vm(bencher: &mut Bencher) { + bencher.bench( + BenchmarkId::new("init", VM::LABEL.as_str()), + BenchmarkingVm::::default, + ); + + for bytecode in BYTECODES { + bencher.bench_with_capture( + BenchmarkId::new(bytecode.name, VM::LABEL.as_str()), + |capture| { + let mut vm = yab::black_box(BenchmarkingVm::::default()); + let tx = yab::black_box(bytecode.deploy_tx()); + capture.measure(|| vm.run_transaction(&tx)); + }, + ); + } +} + +/// Reporter that pushes cachegrind metrics to Prometheus. +#[derive(Debug)] +struct MetricsReporter { + _runtime: Option, +} + +impl Default for MetricsReporter { + fn default() -> Self { + Self { + _runtime: PrometheusRuntime::new(), + } + } +} + +impl Reporter for MetricsReporter { + fn new_benchmark(&mut self, id: &BenchmarkId) -> Box { + Box::new(MetricsBenchmarkReporter(id.clone())) + } +} + +#[derive(Debug)] +struct MetricsBenchmarkReporter(BenchmarkId); + +impl BenchmarkReporter for MetricsBenchmarkReporter { + fn ok(self: Box, output: &BenchmarkOutput) { + #[derive(Debug, Metrics)] + #[metrics(prefix = "vm_cachegrind")] + struct VmCachegrindMetrics { + #[metrics(labels = ["benchmark"])] + instructions: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + l1_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + l2_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + ram_accesses: LabeledFamily>, + #[metrics(labels = ["benchmark"])] + cycles: LabeledFamily>, + } + + #[vise::register] + static VM_CACHEGRIND_METRICS: vise::Global = vise::Global::new(); + + let id = self.0.to_string(); + VM_CACHEGRIND_METRICS.instructions[&id].set(output.stats.total_instructions()); + if let Some(&full) = output.stats.as_full() { + let summary = AccessSummary::from(full); + VM_CACHEGRIND_METRICS.l1_accesses[&id].set(summary.l1_hits); + VM_CACHEGRIND_METRICS.l2_accesses[&id].set(summary.l3_hits); + VM_CACHEGRIND_METRICS.ram_accesses[&id].set(summary.ram_accesses); + VM_CACHEGRIND_METRICS.cycles[&id].set(summary.estimated_cycles()); + } + } +} + +#[derive(Debug, Clone, Copy)] +struct Comparison { + current_cycles: u64, + prev_cycles: Option, +} + +impl Comparison { + fn percent_difference(a: u64, b: u64) -> f64 { + ((b as i64) - (a as i64)) as f64 / (a as f64) * 100.0 + } + + fn new(output: &BenchmarkOutput) -> Option { + let current_cycles = AccessSummary::from(*output.stats.as_full()?).estimated_cycles(); + let prev_cycles = if let Some(prev_stats) = &output.prev_stats { + Some(AccessSummary::from(*prev_stats.as_full()?).estimated_cycles()) + } else { + None + }; + + Some(Self { + current_cycles, + prev_cycles, + }) + } + + fn cycles_diff(&self) -> Option { + self.prev_cycles + .map(|prev_cycles| Self::percent_difference(prev_cycles, self.current_cycles)) + } +} + +/// Reporter that outputs diffs in a Markdown table to stdout after all benchmarks are completed. +/// +/// Significant diff level can be changed via `BENCHMARK_DIFF_THRESHOLD_PERCENT` env var; it is set to 1% by default. +#[derive(Debug)] +struct ComparisonReporter { + comparisons_sender: mpsc::Sender<(String, Comparison)>, + comparisons_receiver: mpsc::Receiver<(String, Comparison)>, +} + +impl Default for ComparisonReporter { + fn default() -> Self { + let (comparisons_sender, comparisons_receiver) = mpsc::channel(); + Self { + comparisons_sender, + comparisons_receiver, + } + } +} + +impl Reporter for ComparisonReporter { + fn new_benchmark(&mut self, id: &BenchmarkId) -> Box { + Box::new(BenchmarkComparison { + comparisons_sender: self.comparisons_sender.clone(), + id: id.clone(), + }) + } + + fn ok(self: Box) { + const ENV_VAR: &str = "BENCHMARK_DIFF_THRESHOLD_PERCENT"; + + let diff_threshold = env::var(ENV_VAR).unwrap_or_else(|_| "1.0".into()); + let diff_threshold: f64 = diff_threshold.parse().unwrap_or_else(|err| { + panic!("incorrect `{ENV_VAR}` value: {err}"); + }); + + // Drop the sender to not hang on the iteration below. + drop(self.comparisons_sender); + let mut comparisons: Vec<_> = self.comparisons_receiver.iter().collect(); + comparisons.retain(|(_, diff)| { + // Output all stats if `diff_threshold <= 0.0` since this is what the user expects + diff.cycles_diff().unwrap_or(0.0) >= diff_threshold + }); + if comparisons.is_empty() { + return; + } + + comparisons.sort_unstable_by(|(name, _), (other_name, _)| name.cmp(other_name)); + + println!("\n## Detected VM performance changes"); + println!("Benchmark name | Est. cycles | Change in est. cycles |"); + println!("|:---|---:|---:|"); + for (name, comparison) in &comparisons { + let diff = comparison + .cycles_diff() + .map_or_else(|| "N/A".to_string(), |diff| format!("{diff:+.1}%")); + println!("| {name} | {} | {diff} |", comparison.current_cycles); + } + } +} + +#[derive(Debug)] +struct BenchmarkComparison { + comparisons_sender: mpsc::Sender<(String, Comparison)>, + id: BenchmarkId, +} + +impl BenchmarkReporter for BenchmarkComparison { + fn ok(self: Box, output: &BenchmarkOutput) { + if let Some(diff) = Comparison::new(output) { + self.comparisons_sender + .send((self.id.to_string(), diff)) + .ok(); + } + } +} + +fn benchmarks(bencher: &mut Bencher) { + if bencher.mode() == BenchMode::PrintResults { + // Only customize reporting if outputting previously collected benchmark result in order to prevent + // reporters influencing cachegrind stats. + bencher + .add_reporter(MetricsReporter::default()) + .add_reporter(ComparisonReporter::default()); + } + benchmarks_for_vm::(bencher); + benchmarks_for_vm::(bencher); +} + +yab::main!(benchmarks); diff --git a/core/tests/vm-benchmark/src/bin/common/mod.rs b/core/tests/vm-benchmark/src/bin/common/mod.rs deleted file mode 100644 index a92c9d5f710c..000000000000 --- a/core/tests/vm-benchmark/src/bin/common/mod.rs +++ /dev/null @@ -1,54 +0,0 @@ -use std::io::BufRead; - -#[derive(Debug)] -pub struct IaiResult { - pub name: String, - pub instructions: u64, - pub l1_accesses: u64, - pub l2_accesses: u64, - pub ram_accesses: u64, - pub cycles: u64, -} - -pub fn parse_iai(iai_output: R) -> impl Iterator { - IaiResultParser { - lines: iai_output.lines().map(|x| x.unwrap()), - } -} - -struct IaiResultParser> { - lines: I, -} - -impl> Iterator for IaiResultParser { - type Item = IaiResult; - - fn next(&mut self) -> Option { - self.lines.next().map(|name| { - let result = IaiResult { - name, - instructions: self.parse_stat(), - l1_accesses: self.parse_stat(), - l2_accesses: self.parse_stat(), - ram_accesses: self.parse_stat(), - cycles: self.parse_stat(), - }; - self.lines.next(); - result - }) - } -} - -impl> IaiResultParser { - fn parse_stat(&mut self) -> u64 { - let line = self.lines.next().unwrap(); - let number = line - .split(':') - .nth(1) - .unwrap() - .split_whitespace() - .next() - .unwrap(); - number.parse().unwrap() - } -} diff --git a/core/tests/vm-benchmark/src/bin/compare_iai_results.rs b/core/tests/vm-benchmark/src/bin/compare_iai_results.rs deleted file mode 100644 index c274b039c9bd..000000000000 --- a/core/tests/vm-benchmark/src/bin/compare_iai_results.rs +++ /dev/null @@ -1,108 +0,0 @@ -use std::{ - collections::{HashMap, HashSet}, - fs::File, - io::{BufRead, BufReader}, -}; - -pub use crate::common::parse_iai; - -mod common; - -fn main() { - let [iai_before, iai_after, opcodes_before, opcodes_after] = std::env::args() - .skip(1) - .take(4) - .collect::>() - .try_into() - .expect("expected four arguments"); - - let iai_before = get_name_to_cycles(&iai_before); - let iai_after = get_name_to_cycles(&iai_after); - let opcodes_before = get_name_to_opcodes(&opcodes_before); - let opcodes_after = get_name_to_opcodes(&opcodes_after); - - let perf_changes = iai_before - .keys() - .collect::>() - .intersection(&iai_after.keys().collect()) - .map(|&name| (name, percent_difference(iai_before[name], iai_after[name]))) - .collect::>(); - - let duration_changes = opcodes_before - .keys() - .collect::>() - .intersection(&opcodes_after.keys().collect()) - .map(|&name| { - let opcodes_abs_diff = (opcodes_after[name] as i64) - (opcodes_before[name] as i64); - (name, opcodes_abs_diff) - }) - .collect::>(); - - let mut nonzero_diff = false; - - for name in perf_changes - .iter() - .filter_map(|(key, value)| (value.abs() > 2.).then_some(key)) - .collect::>() - .union( - &duration_changes - .iter() - .filter_map(|(key, value)| (*value != 0).then_some(key)) - .collect(), - ) - { - // write the header before writing the first line of diff - if !nonzero_diff { - println!("Benchmark name | change in estimated runtime | change in number of opcodes executed \n--- | --- | ---"); - nonzero_diff = true; - } - - let n_a = "N/A".to_string(); - println!( - "{} | {} | {}", - name, - perf_changes - .get(**name) - .map(|percent| format!("{:+.1}%", percent)) - .unwrap_or(n_a.clone()), - duration_changes - .get(**name) - .map(|abs_diff| format!( - "{:+} ({:+.1}%)", - abs_diff, - percent_difference(opcodes_before[**name], opcodes_after[**name]) - )) - .unwrap_or(n_a), - ); - } - - if nonzero_diff { - println!("\n Changes in number of opcodes executed indicate that the gas price of the benchmark has changed, which causes it run out of gas at a different time. Or that it is behaving completely differently."); - } -} - -fn percent_difference(a: u64, b: u64) -> f64 { - ((b as f64) - (a as f64)) / (a as f64) * 100.0 -} - -fn get_name_to_cycles(filename: &str) -> HashMap { - parse_iai(BufReader::new( - File::open(filename).expect("failed to open file"), - )) - .map(|x| (x.name, x.cycles)) - .collect() -} - -fn get_name_to_opcodes(filename: &str) -> HashMap { - BufReader::new(File::open(filename).expect("failed to open file")) - .lines() - .map(|line| { - let line = line.unwrap(); - let mut it = line.split_whitespace(); - ( - it.next().unwrap().to_string(), - it.next().unwrap().parse().unwrap(), - ) - }) - .collect() -} diff --git a/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs b/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs deleted file mode 100644 index 3b3aa05bf69c..000000000000 --- a/core/tests/vm-benchmark/src/bin/iai_results_to_prometheus.rs +++ /dev/null @@ -1,52 +0,0 @@ -use std::{env, io::BufReader, time::Duration}; - -use tokio::sync::watch; -use vise::{Gauge, LabeledFamily, Metrics}; -use zksync_vlog::prometheus::PrometheusExporterConfig; - -use crate::common::{parse_iai, IaiResult}; - -mod common; - -#[derive(Debug, Metrics)] -#[metrics(prefix = "vm_cachegrind")] -pub(crate) struct VmCachegrindMetrics { - #[metrics(labels = ["benchmark"])] - pub instructions: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub l1_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub l2_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub ram_accesses: LabeledFamily>, - #[metrics(labels = ["benchmark"])] - pub cycles: LabeledFamily>, -} - -#[vise::register] -pub(crate) static VM_CACHEGRIND_METRICS: vise::Global = vise::Global::new(); - -#[tokio::main] -async fn main() { - let results: Vec = parse_iai(BufReader::new(std::io::stdin())).collect(); - - let endpoint = env::var("BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL") - .expect("`BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL` env var is not set"); - let (stop_sender, stop_receiver) = watch::channel(false); - let prometheus_config = - PrometheusExporterConfig::push(endpoint.to_owned(), Duration::from_millis(100)); - tokio::spawn(prometheus_config.run(stop_receiver)); - - for result in results { - let name = result.name; - VM_CACHEGRIND_METRICS.instructions[&name.clone()].set(result.instructions); - VM_CACHEGRIND_METRICS.l1_accesses[&name.clone()].set(result.l1_accesses); - VM_CACHEGRIND_METRICS.l2_accesses[&name.clone()].set(result.l2_accesses); - VM_CACHEGRIND_METRICS.ram_accesses[&name.clone()].set(result.ram_accesses); - VM_CACHEGRIND_METRICS.cycles[&name].set(result.cycles); - } - - println!("Waiting for push to happen..."); - tokio::time::sleep(Duration::from_secs(1)).await; - stop_sender.send_replace(true); -} diff --git a/core/tests/vm-benchmark/src/bin/instruction_counts.rs b/core/tests/vm-benchmark/src/bin/instruction_counts.rs index 96208007fd97..ece30a66cee3 100644 --- a/core/tests/vm-benchmark/src/bin/instruction_counts.rs +++ b/core/tests/vm-benchmark/src/bin/instruction_counts.rs @@ -1,16 +1,100 @@ //! Runs all benchmarks and prints out the number of zkEVM opcodes each one executed. -use vm_benchmark::{BenchmarkingVmFactory, Fast, Legacy, BYTECODES}; +use std::{collections::BTreeMap, env, fs, io, path::PathBuf}; -fn main() { - for bytecode in BYTECODES { - let tx = bytecode.deploy_tx(); - let name = bytecode.name; - println!("{name} {}", Fast::<()>::count_instructions(&tx)); - println!( - "{} {}", - name.to_string() + "_legacy", - Legacy::count_instructions(&tx) - ); +use vm_benchmark::{CountInstructions, Fast, Legacy, BYTECODES}; + +#[derive(Debug)] +enum Command { + Print, + Diff { old: PathBuf }, +} + +impl Command { + fn from_env() -> Self { + let mut args = env::args().skip(1); + let Some(first) = args.next() else { + return Self::Print; + }; + assert_eq!(first, "--diff", "Unsupported command-line arg"); + let old = args.next().expect("`--diff` requires a path to old file"); + Self::Diff { old: old.into() } } + + fn print_instructions(counts: &BTreeMap<&str, usize>) { + for (bytecode_name, count) in counts { + println!("{bytecode_name} {count}"); + } + } + + fn parse_counts(reader: impl io::BufRead) -> BTreeMap { + let mut counts = BTreeMap::new(); + for line in reader.lines() { + let line = line.unwrap(); + if line.is_empty() { + continue; + } + let (name, count) = line.split_once(' ').expect("invalid output format"); + let count = count.parse().unwrap_or_else(|err| { + panic!("invalid count for `{name}`: {err}"); + }); + counts.insert(name.to_owned(), count); + } + counts + } + + fn run(self) { + let counts: BTreeMap<_, _> = BYTECODES + .iter() + .map(|bytecode| { + let tx = bytecode.deploy_tx(); + // We have a unit test comparing stats, but do it here as well just in case. + let fast_count = Fast::count_instructions(&tx); + let legacy_count = Legacy::count_instructions(&tx); + assert_eq!( + fast_count, legacy_count, + "mismatch on number of instructions on bytecode `{}`", + bytecode.name + ); + + (bytecode.name, fast_count) + }) + .collect(); + + match self { + Self::Print => Self::print_instructions(&counts), + Self::Diff { old } => { + let file = fs::File::open(&old).unwrap_or_else(|err| { + panic!("failed opening `{}`: {err}", old.display()); + }); + let reader = io::BufReader::new(file); + let old_counts = Self::parse_counts(reader); + + let differing_counts: Vec<_> = counts + .iter() + .filter_map(|(&name, &new_count)| { + let old_count = *old_counts.get(name)?; + (old_count != new_count).then_some((name, old_count, new_count)) + }) + .collect(); + + if !differing_counts.is_empty() { + println!("## ⚠ Detected differing instruction counts"); + println!("| Benchmark | Old count | New count |"); + println!("|-----------|----------:|----------:|"); + for (name, old_count, new_count) in differing_counts { + println!("| {name} | {old_count} | {new_count} |"); + } + println!( + "\nChanges in number of opcodes executed indicate that the gas price of the benchmark has changed, \ + which causes it to run out of gas at a different time." + ); + } + } + } + } +} + +fn main() { + Command::from_env().run(); } diff --git a/core/tests/vm-benchmark/src/criterion.rs b/core/tests/vm-benchmark/src/criterion.rs index 9515ac4ef988..024ccf14139f 100644 --- a/core/tests/vm-benchmark/src/criterion.rs +++ b/core/tests/vm-benchmark/src/criterion.rs @@ -57,7 +57,7 @@ struct VmBenchmarkMetrics { static METRICS: vise::Global = vise::Global::new(); #[derive(Debug)] -struct PrometheusRuntime { +pub struct PrometheusRuntime { stop_sender: watch::Sender, _runtime: tokio::runtime::Runtime, } @@ -72,7 +72,7 @@ impl Drop for PrometheusRuntime { } impl PrometheusRuntime { - fn new() -> Option { + pub fn new() -> Option { const PUSH_INTERVAL: Duration = Duration::from_millis(100); let gateway_url = env::var("BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL").ok()?; @@ -164,7 +164,7 @@ thread_local! { static BIN_NAME: SyncOnceCell<&'static str> = SyncOnceCell::new(); -/// Measurement for criterion that exports . +/// Measurement for criterion that exports timing-related metrics. #[derive(Debug)] pub struct MeteredTime { _prometheus: Option, diff --git a/core/tests/vm-benchmark/src/lib.rs b/core/tests/vm-benchmark/src/lib.rs index 4bd008d33196..dbe2fdb808db 100644 --- a/core/tests/vm-benchmark/src/lib.rs +++ b/core/tests/vm-benchmark/src/lib.rs @@ -6,7 +6,7 @@ pub use crate::{ get_load_test_deploy_tx, get_load_test_tx, get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, }, - vm::{BenchmarkingVm, BenchmarkingVmFactory, Fast, Legacy, VmLabel}, + vm::{BenchmarkingVm, BenchmarkingVmFactory, CountInstructions, Fast, Legacy, VmLabel}, }; pub mod criterion; @@ -70,3 +70,33 @@ pub const BYTECODES: &[Bytecode] = &[ include_bytecode!(slot_hash_collision), include_bytecode!(write_and_decode), ]; + +#[cfg(test)] +mod tests { + use zksync_multivm::interface::{ExecutionResult, VmRevertReason}; + + use super::*; + + #[test] + fn deploy_transactions_are_valid() { + for bytecode in BYTECODES { + println!("Testing bytecode {}", bytecode.name); + + let mut vm = BenchmarkingVm::new(); + let res = vm.run_transaction(&bytecode.deploy_tx()); + match &res.result { + ExecutionResult::Success { .. } => { /* OK */ } + ExecutionResult::Revert { + output: + VmRevertReason::Unknown { + function_selector, + data, + }, + } if function_selector.is_empty() && data.is_empty() => { + // out of gas; this is expected for most fuzzed bytecodes + } + _ => panic!("Unexpected execution result: {:?}", res.result), + } + } + } +} diff --git a/core/tests/vm-benchmark/src/transaction.rs b/core/tests/vm-benchmark/src/transaction.rs index d5fedfa4df94..5c1824e6ffa2 100644 --- a/core/tests/vm-benchmark/src/transaction.rs +++ b/core/tests/vm-benchmark/src/transaction.rs @@ -1,66 +1,30 @@ use once_cell::sync::Lazy; -pub use zksync_contracts::test_contracts::LoadnextContractExecutionParams as LoadTestParams; -use zksync_contracts::{deployer_contract, TestContract}; use zksync_multivm::utils::get_max_gas_per_pubdata_byte; +pub use zksync_test_contracts::LoadnextContractExecutionParams as LoadTestParams; +use zksync_test_contracts::{Account, TestContract}; use zksync_types::{ - ethabi::{encode, Token}, - fee::Fee, - l2::L2Tx, - utils::deployed_address_create, - Address, K256PrivateKey, L2ChainId, Nonce, ProtocolVersionId, Transaction, - CONTRACT_DEPLOYER_ADDRESS, H256, U256, + ethabi::Token, fee::Fee, l2::L2Tx, utils::deployed_address_create, Address, Execute, + K256PrivateKey, L2ChainId, Nonce, ProtocolVersionId, Transaction, H256, U256, }; -use zksync_utils::bytecode::hash_bytecode; -const LOAD_TEST_MAX_READS: usize = 100; +const LOAD_TEST_MAX_READS: usize = 3000; pub(crate) static PRIVATE_KEY: Lazy = Lazy::new(|| K256PrivateKey::from_bytes(H256([42; 32])).expect("invalid key bytes")); static LOAD_TEST_CONTRACT_ADDRESS: Lazy
= Lazy::new(|| deployed_address_create(PRIVATE_KEY.address(), 0.into())); -static LOAD_TEST_CONTRACT: Lazy = Lazy::new(zksync_contracts::get_loadnext_contract); - -static CREATE_FUNCTION_SIGNATURE: Lazy<[u8; 4]> = Lazy::new(|| { - deployer_contract() - .function("create") - .unwrap() - .short_signature() -}); - pub fn get_deploy_tx(code: &[u8]) -> Transaction { get_deploy_tx_with_gas_limit(code, 30_000_000, 0) } pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32, nonce: u32) -> Transaction { - let mut salt = vec![0_u8; 32]; - salt[28..32].copy_from_slice(&nonce.to_be_bytes()); - let params = [ - Token::FixedBytes(salt), - Token::FixedBytes(hash_bytecode(code).0.to_vec()), - Token::Bytes([].to_vec()), - ]; - let calldata = CREATE_FUNCTION_SIGNATURE - .iter() - .cloned() - .chain(encode(¶ms)) - .collect(); - - let mut signed = L2Tx::new_signed( - Some(CONTRACT_DEPLOYER_ADDRESS), - calldata, - Nonce(nonce), - tx_fee(gas_limit), - U256::zero(), - L2ChainId::from(270), - &PRIVATE_KEY, - vec![code.to_vec()], // maybe not needed? - Default::default(), - ) - .expect("should create a signed execute transaction"); - - signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); - signed.into() + let mut salt = H256::zero(); + salt.0[28..32].copy_from_slice(&nonce.to_be_bytes()); + let execute = Execute::for_deploy(salt, code.to_vec(), &[]); + let mut account = Account::new(PRIVATE_KEY.clone()); + account.nonce = Nonce(nonce); + account.get_l2_tx_for_execute(execute, Some(tx_fee(gas_limit))) } fn tx_fee(gas_limit: u32) -> Fee { @@ -94,35 +58,8 @@ pub fn get_transfer_tx(nonce: u32) -> Transaction { pub fn get_load_test_deploy_tx() -> Transaction { let calldata = [Token::Uint(LOAD_TEST_MAX_READS.into())]; - let params = [ - Token::FixedBytes(vec![0_u8; 32]), - Token::FixedBytes(hash_bytecode(&LOAD_TEST_CONTRACT.bytecode).0.to_vec()), - Token::Bytes(encode(&calldata)), - ]; - let create_calldata = CREATE_FUNCTION_SIGNATURE - .iter() - .cloned() - .chain(encode(¶ms)) - .collect(); - - let mut factory_deps = LOAD_TEST_CONTRACT.factory_deps.clone(); - factory_deps.push(LOAD_TEST_CONTRACT.bytecode.clone()); - - let mut signed = L2Tx::new_signed( - Some(CONTRACT_DEPLOYER_ADDRESS), - create_calldata, - Nonce(0), - tx_fee(100_000_000), - U256::zero(), - L2ChainId::from(270), - &PRIVATE_KEY, - factory_deps, - Default::default(), - ) - .expect("should create a signed execute transaction"); - - signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); - signed.into() + let execute = TestContract::load_test().deploy_payload(&calldata); + Account::new(PRIVATE_KEY.clone()).get_l2_tx_for_execute(execute, Some(tx_fee(500_000_000))) } pub fn get_load_test_tx(nonce: u32, gas_limit: u32, params: LoadTestParams) -> Transaction { @@ -131,14 +68,15 @@ pub fn get_load_test_tx(nonce: u32, gas_limit: u32, params: LoadTestParams) -> T "Too many reads: {params:?}, should be <={LOAD_TEST_MAX_READS}" ); - let execute_function = LOAD_TEST_CONTRACT - .contract + let execute_function = TestContract::load_test() + .abi .function("execute") .expect("no `execute` function in load test contract"); let calldata = execute_function .encode_input(&vec![ Token::Uint(U256::from(params.reads)), - Token::Uint(U256::from(params.writes)), + Token::Uint(U256::from(params.initial_writes)), + Token::Uint(U256::from(params.repeated_writes)), Token::Uint(U256::from(params.hashes)), Token::Uint(U256::from(params.events)), Token::Uint(U256::from(params.recursive_calls)), @@ -154,7 +92,7 @@ pub fn get_load_test_tx(nonce: u32, gas_limit: u32, params: LoadTestParams) -> T U256::zero(), L2ChainId::from(270), &PRIVATE_KEY, - LOAD_TEST_CONTRACT.factory_deps.clone(), + TestContract::load_test().factory_deps(), Default::default(), ) .expect("should create a signed execute transaction"); @@ -168,9 +106,10 @@ pub fn get_realistic_load_test_tx(nonce: u32) -> Transaction { nonce, 10_000_000, LoadTestParams { - reads: 30, - writes: 2, - events: 5, + reads: 243, + initial_writes: 1, + repeated_writes: 11, + events: 6, hashes: 10, recursive_calls: 0, deploys: 0, @@ -183,9 +122,10 @@ pub fn get_heavy_load_test_tx(nonce: u32) -> Transaction { nonce, 10_000_000, LoadTestParams { - reads: 100, - writes: 5, - events: 20, + reads: 296, + initial_writes: 13, + repeated_writes: 92, + events: 140, hashes: 100, recursive_calls: 20, deploys: 5, diff --git a/core/tests/vm-benchmark/src/vm.rs b/core/tests/vm-benchmark/src/vm.rs index 30e2321298fe..4bd7d7eb1aa6 100644 --- a/core/tests/vm-benchmark/src/vm.rs +++ b/core/tests/vm-benchmark/src/vm.rs @@ -5,7 +5,7 @@ use zksync_contracts::BaseSystemContracts; use zksync_multivm::{ interface::{ storage::{InMemoryStorage, StorageView}, - ExecutionResult, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + ExecutionResult, InspectExecutionMode, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, @@ -14,22 +14,21 @@ use zksync_multivm::{ zk_evm_latest::ethereum_types::{Address, U256}, }; use zksync_types::{ - block::L2BlockHasher, fee_model::BatchFeeInput, helpers::unix_timestamp_ms, + block::L2BlockHasher, fee_model::BatchFeeInput, helpers::unix_timestamp_ms, u256_to_h256, utils::storage_key_for_eth_balance, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, }; -use zksync_utils::bytecode::hash_bytecode; use crate::{instruction_counter::InstructionCounter, transaction::PRIVATE_KEY}; static SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); static STORAGE: Lazy = Lazy::new(|| { - let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + let mut storage = InMemoryStorage::with_system_contracts(); // Give `PRIVATE_KEY` some money let balance = U256::from(10u32).pow(U256::from(32)); //10^32 wei let key = storage_key_for_eth_balance(&PRIVATE_KEY.address()); - storage.set_value(key, zksync_utils::u256_to_h256(balance)); + storage.set_value(key, u256_to_h256(balance)); storage }); @@ -72,19 +71,21 @@ pub trait BenchmarkingVmFactory { system_env: SystemEnv, storage: &'static InMemoryStorage, ) -> Self::Instance; +} +pub trait CountInstructions { /// Counts instructions executed by the VM while processing the transaction. fn count_instructions(tx: &Transaction) -> usize; } /// Factory for the new / fast VM. #[derive(Debug)] -pub struct Fast(Tr); +pub struct Fast; -impl BenchmarkingVmFactory for Fast { +impl BenchmarkingVmFactory for Fast { const LABEL: VmLabel = VmLabel::Fast; - type Instance = vm_fast::Vm<&'static InMemoryStorage, Tr>; + type Instance = vm_fast::Vm<&'static InMemoryStorage>; fn create( batch_env: L1BatchEnv, @@ -93,27 +94,30 @@ impl BenchmarkingVmFactory for Fast ) -> Self::Instance { vm_fast::Vm::custom(batch_env, system_env, storage) } +} +impl CountInstructions for Fast { fn count_instructions(tx: &Transaction) -> usize { - let mut vm = BenchmarkingVm::>::default(); - vm.0.push_transaction(tx.clone()); + use vm_fast::interface as vm2; #[derive(Default)] struct InstructionCount(usize); - impl vm_fast::Tracer for InstructionCount { - fn before_instruction< - OP: zksync_vm2::interface::OpcodeType, - S: zksync_vm2::interface::StateInterface, - >( + + impl vm2::Tracer for InstructionCount { + fn before_instruction( &mut self, _: &mut S, ) { self.0 += 1; } } - let mut tracer = InstructionCount(0); - vm.0.inspect(&mut tracer, VmExecutionMode::OneTx); + let (system_env, l1_batch_env) = test_env(); + let mut vm = + vm_fast::Vm::<_, InstructionCount>::custom(l1_batch_env, system_env, &*STORAGE); + vm.push_transaction(tx.clone()); + let mut tracer = InstructionCount(0); + vm.inspect(&mut tracer, InspectExecutionMode::OneTx); tracer.0 } } @@ -135,7 +139,9 @@ impl BenchmarkingVmFactory for Legacy { let storage = StorageView::new(storage).to_rc_ptr(); vm_latest::Vm::new(batch_env, system_env, storage) } +} +impl CountInstructions for Legacy { fn count_instructions(tx: &Transaction) -> usize { let mut vm = BenchmarkingVm::::default(); vm.0.push_transaction(tx.clone()); @@ -144,54 +150,57 @@ impl BenchmarkingVmFactory for Legacy { &mut InstructionCounter::new(count.clone()) .into_tracer_pointer() .into(), - VmExecutionMode::OneTx, + InspectExecutionMode::OneTx, ); count.take() } } +fn test_env() -> (SystemEnv, L1BatchEnv) { + let timestamp = unix_timestamp_ms(); + let system_env = SystemEnv { + zk_porter_available: false, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: SYSTEM_CONTRACTS.clone(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + chain_id: L2ChainId::from(270), + }; + let l1_batch_env = L1BatchEnv { + previous_batch_hash: None, + number: L1BatchNumber(1), + timestamp, + fee_input: BatchFeeInput::l1_pegged( + 50_000_000_000, // 50 gwei + 250_000_000, // 0.25 gwei + ), + fee_account: Address::random(), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 1, + timestamp, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 100, + }, + }; + (system_env, l1_batch_env) +} + #[derive(Debug)] pub struct BenchmarkingVm(VM::Instance); impl Default for BenchmarkingVm { fn default() -> Self { - let timestamp = unix_timestamp_ms(); - Self(VM::create( - L1BatchEnv { - previous_batch_hash: None, - number: L1BatchNumber(1), - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - }, - SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: SYSTEM_CONTRACTS.clone(), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - &STORAGE, - )) + let (system_env, l1_batch_env) = test_env(); + Self(VM::create(l1_batch_env, system_env, &STORAGE)) } } impl BenchmarkingVm { pub fn run_transaction(&mut self, tx: &Transaction) -> VmExecutionResultAndLogs { self.0.push_transaction(tx.clone()); - self.0.execute(VmExecutionMode::OneTx) + self.0.execute(InspectExecutionMode::OneTx) } pub fn run_transaction_full(&mut self, tx: &Transaction) -> VmExecutionResultAndLogs { @@ -225,22 +234,20 @@ impl BenchmarkingVm { #[cfg(test)] mod tests { use assert_matches::assert_matches; - use zksync_contracts::read_bytecode; use zksync_multivm::interface::ExecutionResult; + use zksync_test_contracts::TestContract; use super::*; use crate::{ get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, get_load_test_tx, - get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, + get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, BYTECODES, }; #[test] fn can_deploy_contract() { - let test_contract = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json", - ); + let test_contract = &TestContract::counter().bytecode; let mut vm = BenchmarkingVm::new(); - let res = vm.run_transaction(&get_deploy_tx(&test_contract)); + let res = vm.run_transaction(&get_deploy_tx(test_contract)); assert_matches!(res.result, ExecutionResult::Success { .. }); } @@ -282,4 +289,22 @@ mod tests { let res = vm.run_transaction(&get_heavy_load_test_tx(1)); assert_matches!(res.result, ExecutionResult::Success { .. }); } + + #[test] + fn instruction_count_matches_on_both_vms_for_transfer() { + let tx = get_transfer_tx(0); + let legacy_count = Legacy::count_instructions(&tx); + let fast_count = Fast::count_instructions(&tx); + assert_eq!(legacy_count, fast_count); + } + + #[test] + fn instruction_count_matches_on_both_vms_for_benchmark_bytecodes() { + for bytecode in BYTECODES { + let tx = bytecode.deploy_tx(); + let legacy_count = Legacy::count_instructions(&tx); + let fast_count = Fast::count_instructions(&tx); + assert_eq!(legacy_count, fast_count, "bytecode: {}", bytecode.name); + } + } } diff --git a/deny.toml b/deny.toml index dc5a32c2c070..13ce6504107f 100644 --- a/deny.toml +++ b/deny.toml @@ -9,14 +9,13 @@ feature-depth = 1 [advisories] ignore = [ "RUSTSEC-2024-0375", # atty dependency being unmaintained, dependency of clap and criterion, we would need to update to newer major of dependencies - "RUSTSEC-2024-0320", # yaml_rust dependency being unmaintained, dependency in core, we should consider moving to yaml_rust2 fork "RUSTSEC-2020-0168", # mach dependency being unmaintained, dependency in consensus, we should consider moving to mach2 fork "RUSTSEC-2024-0370", # `cs_derive` needs to be updated to not rely on `proc-macro-error` - # all below caused by StructOpt which we still use and we should move to clap v3 instead + # all below caused by StructOpt which we still use and we should move to clap v4 instead "RUSTSEC-2024-0375", "RUSTSEC-2021-0145", "RUSTSEC-2021-0139", - "RUSTSEC-2024-0375", + "RUSTSEC-2024-0388", # `derivative` is unmaintained, crypto dependenicies (boojum, circuit_encodings and others) rely on it ] [licenses] @@ -34,6 +33,7 @@ allow = [ "OpenSSL", "Apache-2.0 WITH LLVM-exception", "0BSD", + "BSL-1.0", ] confidence-threshold = 0.8 diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml index c930fa376f5e..bd91a5a5b0e4 100644 --- a/docker-compose-gpu-runner-cuda-12-0.yml +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -6,8 +6,8 @@ services: ports: - 127.0.0.1:8545:8545 volumes: - - type: bind - source: ./volumes/reth/data + - type: volume + source: reth-data target: /rethdata - type: bind source: ./etc/reth/chaindata @@ -69,3 +69,7 @@ services: environment: # We bind only to 127.0.0.1, so setting insecure password is acceptable here - POSTGRES_PASSWORD=notsecurepassword + +volumes: + postgres-data: + reth-data: \ No newline at end of file diff --git a/docker-compose-runner-nightly.yml b/docker-compose-runner-nightly.yml index cadd1009f7a6..4a854aa0b0a4 100644 --- a/docker-compose-runner-nightly.yml +++ b/docker-compose-runner-nightly.yml @@ -1,4 +1,3 @@ -version: '3.2' services: zk: image: ghcr.io/matter-labs/zk-environment:latest2.0-lightweight-nightly @@ -15,3 +14,7 @@ services: extends: file: docker-compose.yml service: reth + +volumes: + postgres-data: + reth-data: \ No newline at end of file diff --git a/docker-compose-unit-tests.yml b/docker-compose-unit-tests.yml index ddbc76bb196c..b839be2d9f4f 100644 --- a/docker-compose-unit-tests.yml +++ b/docker-compose-unit-tests.yml @@ -1,4 +1,3 @@ -version: '3.2' name: unit_tests services: # An instance of postgres configured to execute Rust unit-tests, tuned for performance. diff --git a/docker-compose.yml b/docker-compose.yml index 1e3a273ec9a4..d8f40720fe84 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,3 @@ -version: '3.2' services: reth: restart: always @@ -6,8 +5,8 @@ services: ports: - 127.0.0.1:8545:8545 volumes: - - type: bind - source: ./volumes/reth/data + - type: volume + source: reth-data target: /rethdata - type: bind source: ./etc/reth/chaindata @@ -22,8 +21,8 @@ services: ports: - 127.0.0.1:5432:5432 volumes: - - type: bind - source: ./volumes/postgres + - type: volume + source: postgres-data target: /var/lib/postgresql/data environment: # We bind only to 127.0.0.1, so setting insecure password is acceptable here @@ -56,3 +55,7 @@ services: profiles: - runner network_mode: host + +volumes: + postgres-data: + reth-data: \ No newline at end of file diff --git a/docker/Makefile b/docker/Makefile index 4e0ca51f904e..19d5fee0907f 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -7,7 +7,7 @@ NODE_VERSION_MIN=20.17.0 YARN_VERSION_MIN=1.22.19 RUST_VERSION=nightly-2024-08-01 SQLX_CLI_VERSION=0.8.1 -FORGE_MIN_VERSION=0.2.0 +FORGE_MIN_VERSION=0.0.2 # Versions and packages checks check-nodejs: diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 80938e4ef835..d5f3c53db99f 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -17,7 +17,7 @@ ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync COPY . . -RUN cargo build --release +RUN cargo build --release --bin zksync_contract_verifier FROM ghcr.io/matter-labs/zksync-runtime-base:latest @@ -47,7 +47,7 @@ RUN mkdir -p /etc/zksolc-bin/vm-1.5.0-a167aa3 && \ chmod +x /etc/zksolc-bin/vm-1.5.0-a167aa3/zksolc # install zksolc 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 6); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 7); do \ mkdir -p /etc/zksolc-bin/$VERSION && \ wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-$VERSION -O /etc/zksolc-bin/$VERSION/zksolc && \ chmod +x /etc/zksolc-bin/$VERSION/zksolc; \ @@ -68,7 +68,7 @@ RUN for VERSION in $(seq -f "v1.4.%g" 0 1); do \ done # install zkvyper 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 5); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 7); do \ mkdir -p /etc/zkvyper-bin/$VERSION && \ wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-$VERSION -O /etc/zkvyper-bin/$VERSION/zkvyper && \ chmod +x /etc/zkvyper-bin/$VERSION/zkvyper; \ diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile index f5c558607400..2effe1051b4a 100644 --- a/docker/external-node/Dockerfile +++ b/docker/external-node/Dockerfile @@ -15,7 +15,7 @@ ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync COPY . . -RUN cargo build --release +RUN cargo build --release --bin zksync_external_node --bin block_reverter FROM ghcr.io/matter-labs/zksync-runtime-base:latest diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index 319d0cefbe34..9557156fa7c4 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -17,7 +17,7 @@ WORKDIR /usr/src/zksync COPY . . -RUN cargo build --release --features=rocksdb/io-uring +RUN cargo build --release --features=rocksdb/io-uring --bin zksync_server --bin block_reverter --bin merkle_tree_consistency_checker FROM ghcr.io/matter-labs/zksync-runtime-base:latest diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 000000000000..7585238efedf --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1 @@ +book diff --git a/docs/book.toml b/docs/book.toml new file mode 100644 index 000000000000..89420a95ba38 --- /dev/null +++ b/docs/book.toml @@ -0,0 +1,32 @@ +[book] +authors = ["ZKsync team"] +language = "en" +multilingual = false +src = "src" +title = "ZKsync Era Documentation" + +[output.html] +smart-punctuation = true +mathjax-support = true +git-repository-url = "https://github.com/matter-labs/zksync-era/tree/main/docs" +edit-url-template = "https://github.com/matter-labs/zksync-era/tree/main/docs/{path}" +additional-js = ["js/version-box.js", "js/mermaid-init.js"] +additional-css = ["css/version-box.css"] + +[output.html.playground] +editable = true +line-numbers = true + +[output.html.search] +limit-results = 20 +use-boolean-and = true +boost-title = 2 +boost-hierarchy = 2 +boost-paragraph = 1 +expand = true +heading-split-level = 2 + +[preprocessor] + +[preprocessor.mermaid] +command = "mdbook-mermaid" diff --git a/docs/css/version-box.css b/docs/css/version-box.css new file mode 100644 index 000000000000..4006ac7804b3 --- /dev/null +++ b/docs/css/version-box.css @@ -0,0 +1,46 @@ +#version-box { + display: flex; + align-items: center; + margin-right: 15px; /* Space from the right side */ + background-color: transparent; /* Make the box background transparent */ +} + +/* Base styles for the version selector */ +#version-selector { + background-color: transparent; /* Remove background color */ + border: 1px solid #4a5568; /* Subtle border */ + border-radius: 4px; /* Rounded edges */ + padding: 5px 10px; /* Padding inside dropdown */ + font-size: 0.9em; + font-weight: normal; + outline: none; /* Removes default focus outline */ + cursor: pointer; +} + +/* Text color for dark themes */ +.theme-navy #version-selector, +.theme-coal #version-selector { + color: #f7fafc; /* Light text color for dark backgrounds */ +} + +/* Text color for light theme */ +.theme-light #version-selector { + color: #333333; /* Dark text color for light background */ +} + +/* Hover effect for better user feedback */ +#version-selector:hover { + background-color: rgba(255, 255, 255, 0.1); /* Light hover effect */ +} + +/* Optional: Style for when the selector is focused */ +#version-selector:focus { + border-color: #63b3ed; /* Accent color for focused state */ +} + +.right-buttons { + display: flex; + flex-direction: row; /* Aligns items in a row, left to right */ + align-items: center; /* Centers items vertically */ + gap: 10px; /* Adds space between items */ +} diff --git a/docs/guides/development.md b/docs/guides/development.md deleted file mode 100644 index c859017848b5..000000000000 --- a/docs/guides/development.md +++ /dev/null @@ -1,148 +0,0 @@ -# Development guide - -This document covers development-related actions in ZKsync. - -## Initializing the project - -To setup the main toolkit, `zk`, simply run: - -``` -zk -``` - -You may also configure autocompletion for your shell via: - -``` -zk completion install -``` - -Once all the dependencies were installed, project can be initialized: - -``` -zk init -``` - -This command will do the following: - -- Generate `$ZKSYNC_HOME/etc/env/target/dev.env` file with settings for the applications. -- Initialize docker containers with `reth` Ethereum node for local development. -- Download and unpack files for cryptographical backend. -- Generate required smart contracts. -- Compile all the smart contracts. -- Deploy smart contracts to the local Ethereum network. -- Create “genesis block” for server. - -Initializing may take pretty long, but many steps (such as downloading & unpacking keys and initializing containers) are -required to be done only once. - -Usually, it is a good idea to do `zk init` once after each merge to the `main` branch (as application setup may change). - -Additionally, there is a subcommand `zk clean` to remove previously generated data. Examples: - -``` -zk clean --all # Remove generated configs, database and backups. -zk clean --config # Remove configs only. -zk clean --database # Remove database. -zk clean --backups # Remove backups. -zk clean --database --backups # Remove database *and* backups, but not configs. -``` - -**When do you need it?** - -1. If you have an initialized database and want to run `zk init`, you have to remove the database first. -2. If after getting new functionality from the `main` branch your code stopped working and `zk init` doesn't help, you - may try removing `$ZKSYNC_HOME/etc/env/target/dev.env` and running `zk init` once again. This may help if the - application configuration has changed. - -If you don’t need all of the `zk init` functionality, but just need to start/stop containers, use the following -commands: - -``` -zk up # Set up `reth` and `postgres` containers -zk down # Shut down `reth` and `postgres` containers -``` - -## Reinitializing - -When actively changing something that affects infrastructure (for example, contracts code), you normally don't need the -whole `init` functionality, as it contains many external steps (e.g. deploying ERC20 tokens) which don't have to be -redone. - -For this case, there is an additional command: - -``` -zk reinit -``` - -This command does the minimal subset of `zk init` actions required to "reinitialize" the network. It assumes that -`zk init` was called in the current environment before. If `zk reinit` doesn't work for you, you may want to run -`zk init` instead. - -## Committing changes - -`zksync` uses pre-commit and pre-push git hooks for basic code integrity checks. Hooks are set up automatically within -the workspace initialization process. These hooks will not allow to commit the code which does not pass several checks. - -Currently the following criteria are checked: - -- Rust code should always be formatted via `cargo fmt`. -- Other code should always be formatted via `zk fmt`. -- Dummy Prover should not be staged for commit (see below for the explanation). - -## Using Dummy Prover - -By default, the chosen prover is a "dummy" one, meaning that it doesn't actually compute proofs but rather uses mocks to -avoid expensive computations in the development environment. - -To switch dummy prover to real prover, one must change `dummy_verifier` to `false` in `contracts.toml` for your env -(most likely, `etc/env/base/contracts.toml`) and run `zk init` to redeploy smart contracts. - -## Testing - -- Running the `rust` unit-tests: - - ``` - zk test rust - ``` - -- Running a specific `rust` unit-test: - - ``` - zk test rust --package --lib ::tests:: - # e.g. zk test rust --package zksync_core --lib eth_sender::tests::resend_each_block - ``` - -- Running the integration test: - - ``` - zk server # Has to be run in the 1st terminal - zk test i server # Has to be run in the 2nd terminal - ``` - -- Running the benchmarks: - - ``` - zk f cargo bench - ``` - -- Running the loadtest: - - ``` - zk server # Has to be run in the 1st terminal - zk prover # Has to be run in the 2nd terminal if you want to use real prover, otherwise it's not required. - zk run loadtest # Has to be run in the 3rd terminal - ``` - -## Contracts - -### Re-build contracts - -``` -zk contract build -``` - -### Publish source code on etherscan - -``` -zk contract publish -``` diff --git a/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml b/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml deleted file mode 100644 index f2a0ce318757..000000000000 --- a/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml +++ /dev/null @@ -1,11 +0,0 @@ -server_addr: '0.0.0.0:3054' -public_addr: '127.0.0.1:3054' -debug_page_addr: '0.0.0.0:5000' -max_payload_size: 5000000 -gossip_dynamic_inbound_limit: 100 -gossip_static_outbound: - # preconfigured ENs owned by Matterlabs that you can connect to - - key: 'node:public:ed25519:68d29127ab03408bf5c838553b19c32bdb3aaaae9bf293e5e078c3a0d265822a' - addr: 'external-node-consensus-mainnet.zksync.dev:3054' - - key: 'node:public:ed25519:b521e1bb173d04bc83d46b859d1296378e94a40427a6beb9e7fdd17cbd934c11' - addr: 'external-node-moby-consensus-mainnet.zksync.dev:3054' diff --git a/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml b/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml deleted file mode 100644 index a5f752fe405a..000000000000 --- a/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml +++ /dev/null @@ -1,11 +0,0 @@ -server_addr: '0.0.0.0:3054' -public_addr: '127.0.0.1:3054' -debug_page_addr: '0.0.0.0:5000' -max_payload_size: 5000000 -gossip_dynamic_inbound_limit: 100 -gossip_static_outbound: - # preconfigured ENs owned by Matterlabs that you can connect to - - key: 'node:public:ed25519:4a94067664e7b8d0927ab1443491dab71a1d0c63f861099e1852f2b6d0831c3e' - addr: 'external-node-consensus-sepolia.zksync.dev:3054' - - key: 'node:public:ed25519:cfbbebc74127099680584f07a051a2573e2dd7463abdd000d31aaa44a7985045' - addr: 'external-node-moby-consensus-sepolia.zksync.dev:3054' diff --git a/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml b/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml deleted file mode 100644 index be37aaf29329..000000000000 --- a/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml +++ /dev/null @@ -1,14 +0,0 @@ -server_addr: '0.0.0.0:3054' -public_addr: ':3054' -max_payload_size: 5000000 -gossip_dynamic_inbound_limit: 100 -gossip_static_outbound: - # preconfigured ENs owned by Matterlabs that you can connect to - - key: 'node:public:ed25519:68d29127ab03408bf5c838553b19c32bdb3aaaae9bf293e5e078c3a0d265822a' - addr: 'external-node-consensus-mainnet.zksync.dev:3054' - - key: 'node:public:ed25519:b521e1bb173d04bc83d46b859d1296378e94a40427a6beb9e7fdd17cbd934c11' - addr: 'external-node-moby-consensus-mainnet.zksync.dev:3054' - - key: 'node:public:ed25519:45d23515008b5121484eb774507df63ff4ce9f4b65e6a03b7c9ec4e0474d3044' - addr: 'consensus-mainnet-1.zksync-nodes.com:3054' - - key: 'node:public:ed25519:c278bb0831e8d0dcd3aaf0b7af7c3dca048d50b28c578ceecce61a412986b883' - addr: 'consensus-mainnet-2.zksync-nodes.com:3054' diff --git a/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml b/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml deleted file mode 100644 index 8d2551c07087..000000000000 --- a/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml +++ /dev/null @@ -1,14 +0,0 @@ -server_addr: '0.0.0.0:3054' -public_addr: ':3054' -max_payload_size: 5000000 -gossip_dynamic_inbound_limit: 100 -gossip_static_outbound: - # preconfigured ENs owned by Matterlabs that you can connect to - - key: 'node:public:ed25519:4a94067664e7b8d0927ab1443491dab71a1d0c63f861099e1852f2b6d0831c3e' - addr: 'external-node-consensus-sepolia.zksync.dev:3054' - - key: 'node:public:ed25519:cfbbebc74127099680584f07a051a2573e2dd7463abdd000d31aaa44a7985045' - addr: 'external-node-moby-consensus-sepolia.zksync.dev:3054' - - key: 'node:public:ed25519:f48616db5965ada49dcbd51b1de11068a27c9886c900d3522607f16dff2e66fc' - addr: 'consensus-sepolia-1.zksync-nodes.com:3054' - - key: 'node:public:ed25519:3789d49293792755a9c1c2a7ed9b0e210e92994606dcf76388b5635d7ed676cb' - addr: 'consensus-sepolia-2.zksync-nodes.com:3054' diff --git a/docs/guides/launch.md b/docs/guides/launch.md deleted file mode 100644 index 10c0b10f5d84..000000000000 --- a/docs/guides/launch.md +++ /dev/null @@ -1,343 +0,0 @@ -# Running the application - -This document covers common scenarios for launching ZKsync applications set locally. - -## Prerequisites - -Prepare dev environment prerequisites: see - -[Installing dependencies](./setup-dev.md) - -## Setup local dev environment - -Setup: - -``` -zk # installs and builds zk itself -zk init -``` - -If you face any other problems with the `zk init` command, go to the -[Troubleshooting](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/launch.md#troubleshooting) section at -the end of this file. There are solutions for some common error cases. - -To completely reset the dev environment: - -- Stop services: - - ``` - zk down - ``` - -- Repeat the setup procedure above - -If `zk init` has already been executed, and now you only need to start docker containers (e.g. after reboot), simply -launch: - -``` -zk up -``` - -### Run observability stack - -If you want to run [Dockprom](https://github.com/stefanprodan/dockprom/) stack (Prometheus, Grafana) alongside other -containers - add `--run-observability` parameter during initialisation. - -``` -zk init --run-observability -``` - -That will also provision Grafana with -[era-observability](https://github.com/matter-labs/era-observability/tree/main/dashboards) dashboards. You can then -access it at `http://127.0.0.1:3000/` under credentials `admin/admin`. - -> If you don't see any data displayed on the Grafana dashboards - try setting the timeframe to "Last 30 minutes". You -> will also have to have `jq` installed on your system. - -## (Re)deploy db and contracts - -``` -zk contract redeploy -``` - -## Environment configurations - -Env config files are held in `etc/env/target/` - -List configurations: - -``` -zk env -``` - -Switch between configurations: - -``` -zk env -``` - -Default configuration is `dev.env`, which is generated automatically from `dev.env.example` during `zk init` command -execution. - -## Build and run server - -Run server: - -``` -zk server -``` - -Server is configured using env files in `./etc/env` directory. After the first initialization, file -`./etc/env/target/dev.env`will be created. By default, this file is copied from the `./etc/env/target/dev.env.example` -template. - -Make sure you have environment variables set right, you can check it by running: `zk env`. You should see `* dev` in -output. - -## Running server using Google cloud storage object store instead of default In memory store - -Get the service_account.json file containing the GCP credentials from kubernetes secret for relevant environment(stage2/ -testnet2) add that file to the default location ~/gcloud/service_account.json or update object_store.toml with the file -location - -``` -zk server -``` - -## Running prover server - -Running on machine without GPU - -```shell -zk f cargo +nightly run --release --bin zksync_prover -``` - -Running on machine with GPU - -```shell -zk f cargo +nightly run --features gpu --release --bin zksync_prover -``` - -## Running the verification key generator - -```shell -# ensure that the setup_2^26.key in the current directory, the file can be download from https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key - -# To generate all verification keys -cargo run --release --bin zksync_verification_key_generator - - -``` - -## Generating binary verification keys for existing json verification keys - -```shell -cargo run --release --bin zksync_json_to_binary_vk_converter -- -o /path/to/output-binary-vk -``` - -## Generating commitment for existing verification keys - -```shell -cargo run --release --bin zksync_commitment_generator -``` - -## Running the contract verifier - -```shell -# To process fixed number of jobs -cargo run --release --bin zksync_contract_verifier -- --jobs-number X - -# To run until manual exit -zk contract_verifier -``` - -## Troubleshooting - -### SSL error: certificate verify failed - -**Problem**. `zk init` fails with the following error: - -``` -Initializing download: https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2%5E20.key -SSL error: certificate verify failed -``` - -**Solution**. Make sure that the version of `axel` on your computer is `2.17.10` or higher. - -### rmSync is not a function - -**Problem**. `zk init` fails with the following error: - -``` -fs_1.default.rmSync is not a function -``` - -**Solution**. Make sure that the version of `node.js` installed on your computer is `14.14.0` or higher. - -### Invalid bytecode: () - -**Problem**. `zk init` fails with an error similar to: - -``` -Running `target/release/zksync_server --genesis` -2023-04-05T14:23:40.291277Z INFO zksync_core::genesis: running regenesis -thread 'main' panicked at 'Invalid bytecode: ()', core/lib/utils/src/bytecode.rs:159:10 -stack backtrace: - 0: 0x104551410 - std::backtrace_rs::backtrace::libunwind::trace::hf9c5171f212b04e2 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/../../backtrace/src/backtrace/libunwind.rs:93:5 - 1: 0x104551410 - std::backtrace_rs::backtrace::trace_unsynchronized::h179003f6ec753118 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/../../backtrace/src/backtrace/mod.rs:66:5 - 2: 0x104551410 - std::sys_common::backtrace::_print_fmt::h92d38f701cf42b17 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/sys_common/backtrace.rs:65:5 - 3: 0x104551410 - ::fmt::hb33e6e8152f78c95 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/sys_common/backtrace.rs:44:22 - 4: 0x10456cdb0 - core::fmt::write::hd33da007f7a27e39 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/core/src/fmt/mod.rs:1208:17 - 5: 0x10454b41c - std::io::Write::write_fmt::h7edc10723862001e - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/io/mod.rs:1682:15 - 6: 0x104551224 - std::sys_common::backtrace::_print::h5e00f05f436af01f - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/sys_common/backtrace.rs:47:5 - 7: 0x104551224 - std::sys_common::backtrace::print::h895ee35b3f17b334 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/sys_common/backtrace.rs:34:9 - 8: 0x104552d84 - std::panicking::default_hook::{{closure}}::h3b7ee083edc2ea3e - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:267:22 - 9: 0x104552adc - std::panicking::default_hook::h4e7c2c28eba716f5 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:286:9 - 10: 0x1045533a8 - std::panicking::rust_panic_with_hook::h1672176227032c45 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:688:13 - 11: 0x1045531c8 - std::panicking::begin_panic_handler::{{closure}}::h0b2d072f9624d32e - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:579:13 - 12: 0x104551878 - std::sys_common::backtrace::__rust_end_short_backtrace::he9abda779115b93c - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/sys_common/backtrace.rs:137:18 - 13: 0x104552f24 - rust_begin_unwind - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:575:5 - 14: 0x1045f89c0 - core::panicking::panic_fmt::h23ae44661fec0889 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/core/src/panicking.rs:64:14 - 15: 0x1045f8ce0 - core::result::unwrap_failed::h414a6cbb12b1e143 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/core/src/result.rs:1791:5 - 16: 0x103f79a30 - zksync_utils::bytecode::hash_bytecode::h397dd7c5b6202bf4 - 17: 0x103e47e78 - zksync_contracts::BaseSystemContracts::load_from_disk::h0e2da8f63292ac46 - 18: 0x102d885a0 - zksync_core::genesis::ensure_genesis_state::{{closure}}::h5143873f2c337e11 - 19: 0x102d7dee0 - zksync_core::genesis_init::{{closure}}::h4e94f3d4ad984788 - 20: 0x102d9c048 - zksync_server::main::{{closure}}::h3fe943a3627d31e1 - 21: 0x102d966f8 - tokio::runtime::park::CachedParkThread::block_on::h2f2fdf7edaf08470 - 22: 0x102df0dd4 - tokio::runtime::runtime::Runtime::block_on::h1fd1d83272a23194 - 23: 0x102e21470 - zksync_server::main::h500621fd4d160768 - 24: 0x102d328f0 - std::sys_common::backtrace::__rust_begin_short_backtrace::h52973e519e2e8a0d - 25: 0x102e08ea8 - std::rt::lang_start::{{closure}}::hbd395afe0ab3b799 - 26: 0x10454508c - core::ops::function::impls:: for &F>::call_once::ha1c2447b9b665e13 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/core/src/ops/function.rs:606:13 - 27: 0x10454508c - std::panicking::try::do_call::ha57d6d1e9532dc1f - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:483:40 - 28: 0x10454508c - std::panicking::try::hca0526f287961ecd - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:447:19 - 29: 0x10454508c - std::panic::catch_unwind::hdcaa7fa896e0496a - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panic.rs:137:14 - 30: 0x10454508c - std::rt::lang_start_internal::{{closure}}::h142ec071d3766871 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/rt.rs:148:48 - 31: 0x10454508c - std::panicking::try::do_call::h95f5e55d6f048978 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:483:40 - 32: 0x10454508c - std::panicking::try::h0fa00e2f7b4a5c64 - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panicking.rs:447:19 - 33: 0x10454508c - std::panic::catch_unwind::h1765f149814d4d3e - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/panic.rs:137:14 - 34: 0x10454508c - std::rt::lang_start_internal::h00a235e820a7f01c - at /rustc/d5a82bbd26e1ad8b7401f6a718a9c57c96905483/library/std/src/rt.rs:148:20 - 35: 0x102e21578 - _main -Error: Genesis is not needed (either Postgres DB or tree's Rocks DB is not empty) -``` - -**Description**. This means that your bytecode config file has an empty entry: `"bytecode": "0x"`. This happens because -your `zksync-2-dev/etc/system-contracts/package.json`'s dependency on `"@matterlabs/hardhat-zksync-solc"` is outdated. -We don't expect this error to happen as we've updated to latest version which fixes the problem. - -**Solution**. Update your dependency and reinit: - -``` -yarn add -D @matterlabs/hardhat-zksync-solc # in the system-contracts folder -zk clean --all && zk init -``` - -On the run, it moved from: - -``` - "@matterlabs/hardhat-zksync-solc": "^0.3.14-beta.3", -``` - -to: - -``` - "@matterlabs/hardhat-zksync-solc": "^0.3.15", -``` - -### Error: Bytecode length in 32-byte words must be odd - -**Problem**. `zk init` fails with an error similar to: - -``` -Successfully generated Typechain artifacts! -Error: Error: Bytecode length in 32-byte words must be odd - at hashL2Bytecode (/Users/emilluta/code/zksync-2-dev/contracts/zksync/src/utils.ts:29:15) - at computeL2Create2Address (/Users/emilluta/code/zksync-2-dev/contracts/zksync/src/utils.ts:53:26) - at /Users/emilluta/code/zksync-2-dev/contracts/zksync/src/compileAndDeployLibs.ts:50:63 - at step (/Users/emilluta/code/zksync-2-dev/contracts/zksync/src/compileAndDeployLibs.ts:33:23) - at Object.next (/Users/emilluta/code/zksync-2-dev/contracts/zksync/src/compileAndDeployLibs.ts:14:53) - at fulfilled (/Users/emilluta/code/zksync-2-dev/contracts/zksync/src/compileAndDeployLibs.ts:5:58) -error Command failed with exit code 1. -info Visit https://yarnpkg.com/en/docs/cli/run for documentation about this command. -error Command failed. -Exit code: 1 -Command: /Users/emilluta/.nvm/versions/node/v16.19.1/bin/node -Arguments: /opt/homebrew/Cellar/yarn/1.22.19/libexec/lib/cli.js compile-and-deploy-libs -Directory: /Users/emilluta/code/zksync-2-dev/contracts/zksync -Output: - -info Visit https://yarnpkg.com/en/docs/cli/workspace for documentation about this command. -error Command failed with exit code 1. -info Visit https://yarnpkg.com/en/docs/cli/run for documentation about this command. -Error: Child process exited with code 1 -``` - -**Description**. This means that your bytecode config file has an empty entry: `"bytecode": "0x"`. This happens because -your `zksync-2-dev/contracts/zksync/package.json`'s dependency on `"@matterlabs/hardhat-zksync-solc"` is outdated. We -don't expect this error to happen as we've updated to latest version which fixes the problem. - -**Solution**. Update your dependency and reinit: - -``` -yarn add -D @matterlabs/hardhat-zksync-solc # in the system-contracts folder -zk clean --all && zk init -``` - -On the run, it moved from: - -``` - "@matterlabs/hardhat-zksync-solc": "^0.3.14-beta.3", -``` - -to: - -``` - "@matterlabs/hardhat-zksync-solc": "^0.3.15", -``` - -### Error: Cannot read properties of undefined (reading 'compilerPath') - -**Problem**. `zk init` fails with an error similar to the following: - -```text -Yarn project directory: /Users//Projects/zksync-era/contracts/system-contracts -Error: Cannot read properties of undefined (reading 'compilerPath') -error Command failed with exit code 1. -``` - -**Description**. The compiler downloader -[could not verify](https://github.com/NomicFoundation/hardhat/blob/0d850d021f3ab33b59b1ea2ae70d1e659e579e40/packages/hardhat-core/src/internal/solidity/compiler/downloader.ts#L336-L383) -that the Solidity compiler it downloaded actually works. - -**Solution**. Delete the cached `*.does.not.work` file to run the check again: - -```sh -# NOTE: Compiler version, commit hash may differ. -rm $HOME/Library/Caches/hardhat-nodejs/compilers-v2/macosx-amd64/solc-macosx-amd64-v0.8.20+commit.a1b79de6.does.not.work -``` diff --git a/docs/js/mermaid-init.js b/docs/js/mermaid-init.js new file mode 100644 index 000000000000..15a7f4e57c60 --- /dev/null +++ b/docs/js/mermaid-init.js @@ -0,0 +1,35 @@ +(() => { + const darkThemes = ['ayu', 'navy', 'coal']; + const lightThemes = ['light', 'rust']; + + const classList = document.getElementsByTagName('html')[0].classList; + + let lastThemeWasLight = true; + for (const cssClass of classList) { + if (darkThemes.includes(cssClass)) { + lastThemeWasLight = false; + break; + } + } + + const theme = lastThemeWasLight ? 'default' : 'dark'; + mermaid.initialize({ startOnLoad: true, theme }); + + // Simplest way to make mermaid re-render the diagrams in the new theme is via refreshing the page + + for (const darkTheme of darkThemes) { + document.getElementById(darkTheme).addEventListener('click', () => { + if (lastThemeWasLight) { + window.location.reload(); + } + }); + } + + for (const lightTheme of lightThemes) { + document.getElementById(lightTheme).addEventListener('click', () => { + if (!lastThemeWasLight) { + window.location.reload(); + } + }); + } +})(); diff --git a/docs/js/version-box.js b/docs/js/version-box.js new file mode 100644 index 000000000000..932a75a5e3bb --- /dev/null +++ b/docs/js/version-box.js @@ -0,0 +1,61 @@ +document.addEventListener('DOMContentLoaded', function () { + // Get the base URL from the mdBook configuration + const baseUrl = document.location.origin + '/zksync-era/core'; + + // Function to create version selector + function createVersionSelector(versions) { + const versionSelector = document.createElement('select'); + versionSelector.id = 'version-selector'; + + // Get the current path + const currentPath = window.location.pathname; + + // Iterate over the versions object + for (const [versionName, versionUrl] of Object.entries(versions)) { + const option = document.createElement('option'); + option.value = versionUrl + '/'; + option.textContent = versionName; + + // Check if the current URL matches this option's value + if (currentPath.includes(option.value)) { + option.selected = true; // Set this option as selected + } + + versionSelector.appendChild(option); + } + + // Event listener to handle version change + versionSelector.addEventListener('change', function () { + const selectedVersion = versionSelector.value; + // Redirect to the selected version URL + window.location.href = '/zksync-era/core' + selectedVersion; + }); + + return versionSelector; + } + + // Fetch versions from JSON file + fetch(baseUrl + '/versions.json') + .then((response) => { + if (!response.ok) { + throw new Error('Network response was not ok ' + response.statusText); + } + return response.json(); + }) + .then((data) => { + const versionSelector = createVersionSelector(data); + const nav = document.querySelector('.right-buttons'); + + if (nav) { + const versionBox = document.createElement('div'); + versionBox.id = 'version-box'; + versionBox.appendChild(versionSelector); + nav.appendChild(versionBox); // Append to the .right-buttons container + } else { + console.error('.right-buttons element not found.'); + } + }) + .catch((error) => { + console.error('There has been a problem with your fetch operation:', error); + }); +}); diff --git a/docs/src/README.md b/docs/src/README.md new file mode 100644 index 000000000000..ab6a417877b5 --- /dev/null +++ b/docs/src/README.md @@ -0,0 +1,26 @@ +# Introduction + +Welcome to the documentation! This guide provides comprehensive insights into the architecture, setup, usage, and +advanced features of ZKsync. + +## Documentation Structure + +- **Guides**: The Guides section is designed to help users at every level, from setup and development to advanced + configuration and debugging techniques. It covers essential topics, including Docker setup, repository management, and + architecture. + +- **Specs**: This section dives into the technical specifications of our system. Here, you’ll find detailed + documentation on data availability, L1 and L2 communication, smart contract interactions, Zero-Knowledge proofs, and + more. Each topic includes an in-depth explanation to support advanced users and developers. + +- **Announcements**: This section highlights important updates, announcements, and committee details, providing + essential information to keep users informed on the latest changes. + +## Getting Started + +Feel free to explore each section according to your needs. This documentation is designed to be modular, so you can jump +to specific topics or follow through step-by-step. + +--- + +Thank you for using our documentation! diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md new file mode 100644 index 000000000000..c0dd8638c8d9 --- /dev/null +++ b/docs/src/SUMMARY.md @@ -0,0 +1,86 @@ + + +# Summary + +[Introduction](README.md) + +# Guides + +- [Basic](guides/README.md) + + - [Setup Dev](guides/setup-dev.md) + - [Development](guides/development.md) + - [Launch](guides/launch.md) + - [Architecture](guides/architecture.md) + - [Build Docker](guides/build-docker.md) + - [Repositories](guides/repositories.md) + +- [Advanced](guides/advanced/README.md) + - [Local initialization](guides/advanced/01_initialization.md) + - [Deposits](guides/advanced/02_deposits.md) + - [Withdrawals](guides/advanced/03_withdrawals.md) + - [Contracts](guides/advanced/04_contracts.md) + - [Calls](guides/advanced/05_how_call_works.md) + - [Transactions](guides/advanced/06_how_transaction_works.md) + - [Fee Model](guides/advanced/07_fee_model.md) + - [L2 Messaging](guides/advanced/08_how_l2_messaging_works.md) + - [Pubdata](guides/advanced/09_pubdata.md) + - [Pubdata with Blobs](guides/advanced/10_pubdata_with_blobs.md) + - [Bytecode compression](guides/advanced/11_compression.md) + - [EraVM intro](guides/advanced/12_alternative_vm_intro.md) + - [ZK Intuition](guides/advanced/13_zk_intuition.md) + - [ZK Deeper Dive](guides/advanced/14_zk_deeper_overview.md) + - [Prover Keys](guides/advanced/15_prover_keys.md) + - [Advanced Debugging](guides/advanced/90_advanced_debugging.md) + - [Docker and CI](guides/advanced/91_docker_and_ci.md) + +# External Node + +- [External node](guides/external-node/01_intro.md) + - [Quick Start](guides/external-node/00_quick_start.md) + - [Configuration](guides/external-node/02_configuration.md) + - [Running](guides/external-node/03_running.md) + - [Observability](guides/external-node/04_observability.md) + - [Troubleshooting](guides/external-node/05_troubleshooting.md) + - [Components](guides/external-node/06_components.md) + - [Snapshots Recovery](guides/external-node/07_snapshots_recovery.md) + - [Pruning](guides/external-node/08_pruning.md) + - [Treeless Mode](guides/external-node/09_treeless_mode.md) + - [Decentralization](guides/external-node/10_decentralization.md) + +# Specs + +- [Introduction](specs/introduction.md) + - [Overview](specs/overview.md) + - [Blocks and Batches](specs/blocks_batches.md) + - [L1 Smart Contracts](specs/l1_smart_contracts.md) +- [Data Availability](specs/data_availability/overview.md) + - [Pubdata](specs/data_availability/pubdata.md) + - [Compression](specs/data_availability/compression.md) + - [Reconstruction](specs/data_availability/reconstruction.md) + - [Validium ZK Porter](specs/data_availability/validium_zk_porter.md) +- [L1 L2 Communication](specs/l1_l2_communication/overview_deposits_withdrawals.md) + - [L1 to L2](specs/l1_l2_communication/l1_to_l2.md) + - [L2 to L1](specs/l1_l2_communication/l2_to_l1.md) +- [Prover](specs/prover/overview.md) + - [Getting Started](specs/prover/getting_started.md) + - [ZK Terminology](specs/prover/zk_terminology.md) + - [Function Check if Satisfied](specs/prover/boojum_function_check_if_satisfied.md) + - [Gadgets](specs/prover/boojum_gadgets.md) + - [Circuit Testing](specs/prover/circuit_testing.md) + - [Circuits Overview](specs/prover/circuits/overview.md) +- [ZK Chains](specs/zk_chains/overview.md) + - [Gateway](specs/zk_chains/gateway.md) + - [Interop](specs/zk_chains/interop.md) + - [Shared Bridge](specs/zk_chains/shared_bridge.md) +- [ZK EVM](specs/zk_evm/vm_overview.md) + - [Account Abstraction](specs/zk_evm/account_abstraction.md) + - [Bootloader](specs/zk_evm/bootloader.md) + - [Fee Model](specs/zk_evm/fee_model.md) + - [Precompiles](specs/zk_evm/precompiles.md) + - [System Contracts](specs/zk_evm/system_contracts.md) + +# Announcements + +- [Announcements](announcements/README.md) + - [Attester Committee](announcements/attester_commitee.md) diff --git a/docs/announcements/README.md b/docs/src/announcements/README.md similarity index 100% rename from docs/announcements/README.md rename to docs/src/announcements/README.md diff --git a/docs/announcements/attester_commitee.md b/docs/src/announcements/attester_commitee.md similarity index 97% rename from docs/announcements/attester_commitee.md rename to docs/src/announcements/attester_commitee.md index 84ff8aa5be6d..148e51a4f976 100644 --- a/docs/announcements/attester_commitee.md +++ b/docs/src/announcements/attester_commitee.md @@ -36,7 +36,7 @@ Participants can leave the committee at any time. The only action that is required to participate is to share your attester public key with the Main Node operator (by opening an issue in this repo or using any other communication channel). You can find it in the comment in the `consensus_secrets.yaml` file (that was - in most cases - generated by the tool described -[here](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/09_decentralization.md#generating-secrets)) +[here](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/10_decentralization.md#generating-secrets)) > [!WARNING] > diff --git a/docs/src/guides/README.md b/docs/src/guides/README.md new file mode 100644 index 000000000000..f9d5bc852a26 --- /dev/null +++ b/docs/src/guides/README.md @@ -0,0 +1,12 @@ +# ZKsync basic guides + +This section contains basic guides that aim to explain the ZKsync ecosystem in an easy to grasp way. + +## Table of Contents + +- [Architecture](./architecture.md) +- [Build Docker](./build-docker.md) +- [Development](./development.md) +- [Launch](./launch.md) +- [Repositories](./repositories.md) +- [Setup Dev](./setup-dev.md) diff --git a/docs/guides/advanced/01_initialization.md b/docs/src/guides/advanced/01_initialization.md similarity index 75% rename from docs/guides/advanced/01_initialization.md rename to docs/src/guides/advanced/01_initialization.md index 79c33434d3b5..2bc4a9c3a459 100644 --- a/docs/guides/advanced/01_initialization.md +++ b/docs/src/guides/advanced/01_initialization.md @@ -1,4 +1,4 @@ -# ZKsync deeper dive +# ZKsync Deeper Dive The goal of this doc is to show you some more details on how ZKsync works internally. @@ -7,18 +7,22 @@ system). Now let's take a look at what's inside: -### Initialization (zk init) +### Initialization -Let's take a deeper look into what `zk init` does. +Let's take a deeper look into what `zkstack ecosystem init` does. -#### zk tool +#### ZK Stack CLI -`zk` itself is implemented in typescript (you can see the code in `infrastructure` directory). If you change anything -there, make sure to run `zk` (that compiles this code), before re-running `zk init`. +`zkstack` itself is implemented in Rust (you can see the code in `/zkstack_cli` directory). If you change anything +there, make sure to run `zkstackup --local` from the root folder (that compiles and installs this code), before +re-running any `zkstack` command. -#### zk init +#### Containers -As first step, it gets the docker images for postgres and reth. +The first step to initialize a ZK Stack ecosystem is to run the command `zkstack containers`. This command gets the +docker images for `postgres` and `reth`. If the `--observability` option is passed to the command, or the corresponding +option is selected in the interactive prompt, then Prometheus, Grafana and other observability-related images are +downloaded and run. Reth (one of the Ethereum clients) will be used to setup our own copy of L1 chain (that our local ZKsync would use). @@ -26,11 +30,19 @@ Postgres is one of the two databases, that is used by ZKsync (the other one is R stored in postgres (blocks, transactions etc) - while RocksDB is only storing the state (Tree & Map) - and it used by VM. -Then we compile JS packages (these include our web3 sdk, tools and testing infrastructure). +#### Ecosystem -Then L1 & L2 contracts. +The next step is to run the command `zkstack ecosystem init`. -And now we're ready to start setting up the system. +This command: + +- Collects and finalize the ecosystem configuration. +- Builds and deploys L1 & L2 contracts. +- Initializes each chain defined in the `/chains` folder. (Currently, a single chain `era` is defined there, but you can + create your own chains running `zkstack chain create`). +- Sets up observability. +- Runs the genesis process. +- Initializes the database. #### Postgres @@ -83,8 +95,8 @@ If everything goes well, you should see that L1 blocks are being produced. Now we can start the main server: -```shell -zk server +```bash +zkstack server ``` This will actually run a cargo binary (`zksync_server`). @@ -96,7 +108,7 @@ Currently we don't send any transactions there (so the logs might be empty). But you should see some initial blocks in postgres: -``` +```sql select * from miniblocks; ``` @@ -107,7 +119,7 @@ Let's finish this article, by taking a look at our L1: We will use the `web3` tool to communicate with the L1, have a look at [02_deposits.md](02_deposits.md) for installation instructions. You can check that you're a (localnet) crypto trillionaire, by running: -```shell +```bash ./web3 --rpc-url http://localhost:8545 balance 0x36615Cf349d7F6344891B1e7CA7C72883F5dc049 ``` @@ -120,14 +132,14 @@ In order to communicate with L2 (our ZKsync) - we have to deploy multiple contra Ethereum). You can look on the `deployL1.log` file - to see the list of contracts that were deployed and their accounts. First thing in the file, is the deployer/governor wallet - this is the account that can change, freeze and unfreeze the -contracts (basically the owner). You can also verify (using the getBalance method above), that is has a lot of tokens. +contracts (basically the owner). You can verify the token balance using the `getBalance` method above. Then, there are a bunch of contracts (CRATE2_FACTOR, DIAMOND_PROXY, L1_ALLOW_LIST etc etc) - for each one, the file contains the address. You can quickly verify that they were really deployed, by calling: -```shell +```bash ./web3 --rpc-url http://localhost:8545 address XXX ``` diff --git a/docs/guides/advanced/02_deposits.md b/docs/src/guides/advanced/02_deposits.md similarity index 100% rename from docs/guides/advanced/02_deposits.md rename to docs/src/guides/advanced/02_deposits.md diff --git a/docs/guides/advanced/03_withdrawals.md b/docs/src/guides/advanced/03_withdrawals.md similarity index 100% rename from docs/guides/advanced/03_withdrawals.md rename to docs/src/guides/advanced/03_withdrawals.md diff --git a/docs/guides/advanced/04_contracts.md b/docs/src/guides/advanced/04_contracts.md similarity index 100% rename from docs/guides/advanced/04_contracts.md rename to docs/src/guides/advanced/04_contracts.md diff --git a/docs/guides/advanced/05_how_call_works.md b/docs/src/guides/advanced/05_how_call_works.md similarity index 98% rename from docs/guides/advanced/05_how_call_works.md rename to docs/src/guides/advanced/05_how_call_works.md index 5b9458ddce8e..0126c5349e90 100644 --- a/docs/guides/advanced/05_how_call_works.md +++ b/docs/src/guides/advanced/05_how_call_works.md @@ -12,7 +12,7 @@ Since the 'call' method is only for reading data, all the calculations will happ ### Calling the 'call' method If you need to make calls quickly, you can use the 'cast' binary from the -[foundry](https://github.com/foundry-rs/foundry) suite: +[Foundry ZKsync](https://foundry-book.zksync.io/getting-started/installation) suite: ```shell= cast call 0x23DF7589897C2C9cBa1C3282be2ee6a938138f10 "myfunction()()" --rpc-url http://localhost:3050 diff --git a/docs/guides/advanced/06_how_transaction_works.md b/docs/src/guides/advanced/06_how_transaction_works.md similarity index 100% rename from docs/guides/advanced/06_how_transaction_works.md rename to docs/src/guides/advanced/06_how_transaction_works.md diff --git a/docs/guides/advanced/07_fee_model.md b/docs/src/guides/advanced/07_fee_model.md similarity index 100% rename from docs/guides/advanced/07_fee_model.md rename to docs/src/guides/advanced/07_fee_model.md diff --git a/docs/guides/advanced/08_how_l2_messaging_works.md b/docs/src/guides/advanced/08_how_l2_messaging_works.md similarity index 100% rename from docs/guides/advanced/08_how_l2_messaging_works.md rename to docs/src/guides/advanced/08_how_l2_messaging_works.md diff --git a/docs/guides/advanced/09_pubdata.md b/docs/src/guides/advanced/09_pubdata.md similarity index 100% rename from docs/guides/advanced/09_pubdata.md rename to docs/src/guides/advanced/09_pubdata.md diff --git a/docs/guides/advanced/10_pubdata_with_blobs.md b/docs/src/guides/advanced/10_pubdata_with_blobs.md similarity index 100% rename from docs/guides/advanced/10_pubdata_with_blobs.md rename to docs/src/guides/advanced/10_pubdata_with_blobs.md diff --git a/docs/guides/advanced/11_compression.md b/docs/src/guides/advanced/11_compression.md similarity index 100% rename from docs/guides/advanced/11_compression.md rename to docs/src/guides/advanced/11_compression.md diff --git a/docs/guides/advanced/12_alternative_vm_intro.md b/docs/src/guides/advanced/12_alternative_vm_intro.md similarity index 100% rename from docs/guides/advanced/12_alternative_vm_intro.md rename to docs/src/guides/advanced/12_alternative_vm_intro.md diff --git a/docs/guides/advanced/13_zk_intuition.md b/docs/src/guides/advanced/13_zk_intuition.md similarity index 100% rename from docs/guides/advanced/13_zk_intuition.md rename to docs/src/guides/advanced/13_zk_intuition.md diff --git a/docs/guides/advanced/14_zk_deeper_overview.md b/docs/src/guides/advanced/14_zk_deeper_overview.md similarity index 100% rename from docs/guides/advanced/14_zk_deeper_overview.md rename to docs/src/guides/advanced/14_zk_deeper_overview.md diff --git a/docs/guides/advanced/15_prover_keys.md b/docs/src/guides/advanced/15_prover_keys.md similarity index 100% rename from docs/guides/advanced/15_prover_keys.md rename to docs/src/guides/advanced/15_prover_keys.md diff --git a/docs/src/guides/advanced/16_decentralization.md b/docs/src/guides/advanced/16_decentralization.md new file mode 100644 index 000000000000..6037235ea064 --- /dev/null +++ b/docs/src/guides/advanced/16_decentralization.md @@ -0,0 +1,104 @@ +# Decentralization + +To enable support for synchronization over p2p network, the main node needs to have the "consensus" component configured +and enabled as follows: + +## Generating the consensus secrets + +Run the following to generate consensus secrets: + +``` +docker run --entrypoint /usr/bin/zksync_external_node "matterlabs/external-node:2.0-v25.1.0" generate-secrets > consensus_secrets.yaml +chmod 600 consensus_secrets.yaml +``` + +## Preparing the consensus config + +Create `consensus_config.yaml` file with the following content (remember to replace the placeholders): + +```yaml +server_addr: '0.0.0.0:3054' +public_addr: + # Address under which the node is accessible to the other nodes. + # It can be a public domain, like `example.com:3054`, in case the main node is accessible from the internet, + # or it can be a kubernetes cluster domain, like `server-v2-core..svc.cluster.local:3054` in + # case the main node should be only accessible within the cluster. +debug_page_addr: '0.0.0.0:5000' +max_payload_size: 3200000 +gossip_dynamic_inbound_limit: 10 +genesis_spec: + chain_id: # chain id + protocol_version: 1 # consensus protocol version + validators: + - key: validator:public:??? # public key of the main node (copy this PUBLIC key from consensus_secrets.yaml) + weight: 1 + leader: validator:public:??? # same as above - main node will be the only validator and the only leader. +``` + +## Providing the configuration to the `zksync_server` + +To enable consensus component for the main node you need to append +`--components=,consensus` to the `zksync_server` command line arguments. +In addition to that, you need to provide the configuration (from the files `consensus_config.yaml` and +`consensus_secrets.yaml` that we have just prepared) to the `zksync_server` binary. There are 2 ways (hopefully not for +long) to achieve that: + +- In file-based configuration system, the consensus config is embedded in the + [general config](https://github.com/matter-labs/zksync-era/blob/1edcabe0c6a02d5b6700c29c0d9f6220ec6fb03c/core/lib/config/src/configs/general.rs#L58), + and the consensus secrets are embedded in the + [secrets config](https://github.com/matter-labs/zksync-era/blob/main/core/bin/zksync_server/src/main.rs). Paste the + content of the generated `consensus_secrets.yaml` file to the `secrets` config, and prepared config to the `general` + config. + +- In env-var-based configuration system, the consensus config and consensus secrets files are passed as standalone + files. The paths to these files need to be passed as env vars `CONSENSUS_CONFIG_PATH` and `CONSENSUS_SECRETS_PATH`. + +## Gitops repo config + +If you are using the matterlabs gitops repo to configure the main node, it is even more complicated becase the +`consensus_config.yaml` file is rendered from a helm chart. See the +[example](https://github.com/matter-labs/gitops-kubernetes/blob/main/apps/environments/mainnet2/server-v2/server-v2-core.yaml), +to see where you have to paste the content of the `consensus_config.yaml` file. + +You need to embed the `consenus_secrets.yaml` file into a kubernetes config: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: consensus-secrets +type: Opaque +stringData: + .consensus_secrets.yaml: +``` + +You need to add the following sections to your kubernetes config for the core server: + +```yaml +spec: + values: + persistence: + consensus-secrets-volume: + name: consensus-secrets # this is the name of the secret kubernetes object we defined above + enabled: true + type: secret + mountPath: '/etc/consensus_secrets/' + args: + - --components=state_keeper,consensus + service: + main: + ports: + consensus: + enabled: true + port: 3054 + configMap: + consensus: + enabled: true + data: + consensus_config.yaml: + env: + - name: CONSENSUS_CONFIG_PATH + value: /etc/consensus_config.yaml # this is the location rendered by the helm chart, you can't change it + - name: CONSENSUS_SECRETS_PATH + value: /etc/consensus_secrets/.consensus_secrets.yaml +``` diff --git a/docs/guides/advanced/90_advanced_debugging.md b/docs/src/guides/advanced/90_advanced_debugging.md similarity index 100% rename from docs/guides/advanced/90_advanced_debugging.md rename to docs/src/guides/advanced/90_advanced_debugging.md diff --git a/docs/guides/advanced/91_docker_and_ci.md b/docs/src/guides/advanced/91_docker_and_ci.md similarity index 93% rename from docs/guides/advanced/91_docker_and_ci.md rename to docs/src/guides/advanced/91_docker_and_ci.md index ff1c7843b8b1..885d3155dd6c 100644 --- a/docs/guides/advanced/91_docker_and_ci.md +++ b/docs/src/guides/advanced/91_docker_and_ci.md @@ -64,8 +64,8 @@ zk After this, you can run any commands you need. -When you see a command like `ci_run zk contract build` in the CI - this simply means that it executed -`zk contract build` inside that docker container. +When you see a command like `ci_run zkstack dev contracts` in the CI - this simply means that it executed +`zkstack dev contracts` inside that docker container. **IMPORTANT** - by default, docker is running in the mode, where it does NOT persist the changes. So if you exit that shell, all the changes will be removed (so when you restart, you'll end up in the same pristine condition). You can diff --git a/docs/guides/advanced/README.md b/docs/src/guides/advanced/README.md similarity index 93% rename from docs/guides/advanced/README.md rename to docs/src/guides/advanced/README.md index 5a3673b558ad..e0f8a82f2fb5 100644 --- a/docs/guides/advanced/README.md +++ b/docs/src/guides/advanced/README.md @@ -20,9 +20,10 @@ way. - [ZK intuition](./13_zk_intuition.md) - [ZK deeper overview](./14_zk_deeper_overview.md) - [Prover keys](./15_prover_keys.md) +- [Decentralization](./16_decentralization.md) Additionally, there are a few articles that cover specific topics that may be useful for developers actively working on -`zksync-era` repo. +`zksync-era` repo: - [Advanced debugging](./90_advanced_debugging.md) - [Docker and CI](./91_docker_and_ci.md) diff --git a/docs/guides/architecture.md b/docs/src/guides/architecture.md similarity index 61% rename from docs/guides/architecture.md rename to docs/src/guides/architecture.md index 25676ad74aa7..6af177ab8b69 100644 --- a/docs/guides/architecture.md +++ b/docs/src/guides/architecture.md @@ -5,7 +5,7 @@ structure of the physical architecture of the ZKsync Era project. ## High-Level Overview -The zksync-2-dev repository has the following main units: +The zksync-era repository has the following main units: **Smart Contracts:** All the smart contracts in charge of the protocols on the L1 & L2. Some main contracts: @@ -31,29 +31,53 @@ APIs, rather via the single source of truth -- the db storage layer. ## Low-Level Overview -This section provides a physical map of folders & files in this repository. +This section provides a physical map of folders & files in this repository. It doesn't aim to be complete, it only shows +the most important parts. -- `/contracts` - - - `/ethereum`: Smart contracts deployed on the Ethereum L1. - - `/zksync`: Smart contracts deployed on the ZKsync L2. +- `/contracts`: A submodule with L1, L2, and system contracts. See + [repository](https://github.com/matter-labs/era-contracts/). - `/core` - `/bin`: Executables for the microservices components comprising ZKsync Core Node. - - `/admin-tools`: CLI tools for admin operations (e.g. restarting prover jobs). + - `/zksync_server`: Main sequencer implementation. - `/external_node`: A read replica that can sync from the main node. + - `/tee_prover`: Implementation of the TEE prover. + + - `/node`: Composable node parts. + + - `/node_framework`: Framework used to compose parts of the node. + - `/api_server`: Implementation of Web3 JSON RPC server. + - `/base_token_adjuster`: Adaptor to support custom (non-ETH) base tokens. + - `/block_reverter`: Component for reverting L2 blocks and L1 batches. + - `/commitment_generator`: Component for calculation of commitments required for ZKP generation. + - `/consensus`: p2p utilities. + - `/consistency_checker`: Security component for the external node. + - `/da_clients`: Clients for different data availability solutions. + - `/da_dispatcher`: Adaptor for alternative DA solutions. + - `/eth_sender`: Component responsible for submitting batches to L1 contract. + - `/eth_watch`: Component responsible for retrieving data from the L1 contract. + - `/fee_model`: Fee logic implementation. + - `/genesis`: Logic for performing chain genesis. + - `/metadata_calculator`: Component responsible for Merkle tree maintenance. + - `/node_storage_init`: Strategies for the node initialization. + - `/node_sync`: Node synchronization for the external node. + - `/proof_data_handler`: Gateway API for interaction with the prover subsystem. + - `/reorg_detector`: Component responsible for detecting reorgs on the external node. + - `/state_keeper`: Main part of the sequencer, responsible for forming blocks and L1 batches. + - `/vm_runner`: Set of components generating various data by re-running sealed L1 batches. - `/lib`: All the library crates used as dependencies of the binary crates above. - `/basic_types`: Crate with essential ZKsync primitive types. - - `/config`: All the configured values used by the different ZKsync apps. + - `/config`: All the configuration values used by the different ZKsync apps. - `/contracts`: Contains definitions of commonly used smart contracts. - - `/crypto`: Cryptographical primitives used by the different ZKsync crates. + - `/crypto_primitives`: Cryptographical primitives used by the different ZKsync crates. - `/dal`: Data availability layer - `/migrations`: All the db migrations applied to create the storage layer. - `/src`: Functionality to interact with the different db tables. + - `/db_connection`: Generic DB interface. - `/eth_client`: Module providing an interface to interact with an Ethereum node. - `/eth_signer`: Module to sign messages and txs. - `/mempool`: Implementation of the ZKsync transaction pool. @@ -61,37 +85,17 @@ This section provides a physical map of folders & files in this repository. - `/mini_merkle_tree`: In-memory implementation of a sparse Merkle tree. - `/multivm`: A wrapper over several versions of VM that have been used by the main node. - `/object_store`: Abstraction for storing blobs outside the main data store. - - `/prometheus_exporter`: Prometheus data exporter. - `/queued_job_processor`: An abstraction for async job processing - `/state`: A state keeper responsible for handling transaction execution and creating miniblocks and L1 batches. - `/storage`: An encapsulated database interface. - `/test_account`: A representation of ZKsync account. - `/types`: ZKsync network operations, transactions, and common types. - `/utils`: Miscellaneous helpers for ZKsync crates. - - `/vlog`: ZKsync logging utility. - - `/vm`: ULightweight out-of-circuit VM interface. + - `/vlog`: ZKsync observability stack. + - `/vm_interface`: Generic interface for ZKsync virtual machine. - `/web3_decl`: Declaration of the Web3 API. - - `zksync_core/src` - - `/api_server` Externally facing APIs. - - `/web3`: ZKsync implementation of the Web3 API. - - `/tx_sender`: Helper module encapsulating the transaction processing logic. - - `/bin`: The executable main starting point for the ZKsync server. - - `/consistency_checker`: ZKsync watchdog. - - `/eth_sender`: Submits transactions to the ZKsync smart contract. - - `/eth_watch`: Fetches data from the L1. for L2 censorship resistance. - - `/fee_monitor`: Monitors the ratio of fees collected by executing txs over the costs of interacting with - Ethereum. - - `/fee_ticker`: Module to define the price components of L2 transactions. - - `/gas_adjuster`: Module to determine the fees to pay in txs containing blocks submitted to the L1. - - `/gas_tracker`: Module for predicting L1 gas cost for the Commit/PublishProof/Execute operations. - - `/metadata_calculator`: Module to maintain the ZKsync state tree. - - `/state_keeper`: The sequencer. In charge of collecting the pending txs from the mempool, executing them in the - VM, and sealing them in blocks. - - `/witness_generator`: Takes the sealed blocks and generates a _Witness_, the input for the prover containing the - circuits to be proved. - `/tests`: Testing infrastructure for ZKsync network. - - `/cross_external_nodes_checker`: A tool for checking external nodes consistency against the main node. - `/loadnext`: An app for load testing the ZKsync server. - `/ts-integration`: Integration tests set implemented in TypeScript. @@ -106,6 +110,3 @@ This section provides a physical map of folders & files in this repository. - `/env`:`.env` files that contain environment variables for different configurations of ZKsync Server / Prover. - `/keys`: Verification keys for `circuit` module. - -- `/sdk`: Implementation of client libraries for the ZKsync network in different programming languages. - - `/zksync-rs`: Rust client library for ZKsync. diff --git a/docs/guides/build-docker.md b/docs/src/guides/build-docker.md similarity index 100% rename from docs/guides/build-docker.md rename to docs/src/guides/build-docker.md diff --git a/docs/src/guides/development.md b/docs/src/guides/development.md new file mode 100644 index 000000000000..fb8dd44a6c7a --- /dev/null +++ b/docs/src/guides/development.md @@ -0,0 +1,197 @@ +# Development guide + +This document outlines the steps for setting up and working with ZKsync. + +## Prerequisites + +If you haven't already, install the prerequisites as described in [Install Dependencies](./setup-dev.md). + +## Installing the local ZK Stack CLI + +To set up local development, begin by installing +[ZK Stack CLI](https://github.com/matter-labs/zksync-era/blob/main/zkstack_cli/README.md). From the project's root +directory, run the following commands: + +```bash +cd ./zkstack_cli/zkstackup +./install --local +``` + +This installs `zkstackup` in your user binaries directory (e.g., `$HOME/.local/bin/`) and adds it to your `PATH`. + +After installation, open a new terminal or reload your shell profile. From the project's root directory, you can now +run: + +```bash +zkstackup --local +``` + +This command installs `zkstack` from the current source directory. + +You can proceed to verify the installation and start familiarizing with the CLI by running: + +```bash +zkstack --help +``` + +> NOTE: Whenever you want to update you local installation with your changes, just rerun: +> +> ```bash +> zkstackup --local +> ``` +> +> You might find convenient to add this alias to your shell profile: +> +> `alias zkstackup='zkstackup --path /path/to/zksync-era'` + +## Configure Ecosystem + +The project root directory includes configuration files for an ecosystem with a single chain, `era`. To initialize the +ecosystem, first start the required containers: + +```bash +zkstack containers +``` + +Next, run: + +```bash +zkstack ecosystem init +``` + +These commands will guide you through the configuration options for setting up the ecosystem. + +> NOTE: For local development only. You can also use the development defaults by supplying the `--dev` flag. + +Initialization may take some time, but key steps (such as downloading and unpacking keys or setting up containers) only +need to be completed once. + +To see more detailed output, you can run commands with the `--verbose` flag. + +## Cleanup + +To clean up the local ecosystem (e.g., removing containers and clearing the contract cache), run: + +```bash +zkstack dev clean all +``` + +You can then reinitialize the ecosystem as described in the [Configure Ecosystem](#configure-ecosystem) section. + +```bash +zkstack containers +zkstack ecosystem init +``` + +## Committing changes + +`zksync` uses pre-commit and pre-push git hooks for basic code integrity checks. Hooks are set up automatically within +the workspace initialization process. These hooks will not allow to commit the code which does not pass several checks. + +Currently the following criteria are checked: + +- Code must be formatted via `zkstack dev fmt`. +- Code must be linted via `zkstack dev lint`. + +## Testing + +ZKstack CLI offers multiple subcommands to run specific integration and unit test: + +```bash +zkstack dev test --help +``` + +```bash +Usage: zkstack dev test [OPTIONS] + +Commands: + integration Run integration tests + fees Run fees test + revert Run revert tests + recovery Run recovery tests + upgrade Run upgrade tests + build Build all test dependencies + rust Run unit-tests, accepts optional cargo test flags + l1-contracts Run L1 contracts tests + prover Run prover tests + wallet Print test wallets information + loadtest Run loadtest + help Print this message or the help of the given subcommand(s) +``` + +### Running unit tests + +You can run unit tests for the Rust crates in the project by running: + +```bash +zkstack dev test rust +``` + +### Running integration tests + +Running integration tests is more complex. Some tests require a running server, while others need the system to be in a +specific state. Please refer to our CI scripts +[ci-core-reusable.yml](https://github.com/matter-labs/zksync-era/blob/main/.github/workflows/ci-core-reusable.yml) to +have a better understanding of the process. + +### Running load tests + +The current load test implementation only supports the legacy bridge. To use it, you need to create a new chain with +legacy bridge support: + +```bash +zkstack chain create --legacy-bridge +zkstack chain init +``` + +After initializing the chain with a legacy bridge, you can run the load test against it. + +```bash +zkstack dev test loadtest +``` + +> WARNING: Never use legacy bridges in non-testing environments. + +## Contracts + +### Build contracts + +Run: + +```bash +zkstack dev contracts --help +``` + +to see all the options. + +### Publish source code on Etherscan + +#### Verifier Options + +Most commands interacting with smart contracts support the same verification options as Foundry's `forge` command. Just +double check if the following options are available in the subcommand: + +```bash +--verifier -- Verifier to use +--verifier-api-key -- Verifier API key +--verifier-url -- Verifier URL, if using a custom provider +``` + +#### Using Foundry + +You can use `foundry` to verify the source code of the contracts. + +```bash +forge verify-contract +``` + +Verifies a smart contract on a chosen verification provider. + +You must provide: + +- The contract address +- The contract name or the path to the contract. +- In case of Etherscan verification, you must also provide: + - Your Etherscan API key, either by passing it as an argument or setting `ETHERSCAN_API_KEY` + +For more information check [Foundry's documentation](https://book.getfoundry.sh/reference/forge/forge-verify-contract). diff --git a/docs/guides/external-node/00_quick_start.md b/docs/src/guides/external-node/00_quick_start.md similarity index 100% rename from docs/guides/external-node/00_quick_start.md rename to docs/src/guides/external-node/00_quick_start.md diff --git a/docs/guides/external-node/01_intro.md b/docs/src/guides/external-node/01_intro.md similarity index 100% rename from docs/guides/external-node/01_intro.md rename to docs/src/guides/external-node/01_intro.md diff --git a/docs/guides/external-node/02_configuration.md b/docs/src/guides/external-node/02_configuration.md similarity index 100% rename from docs/guides/external-node/02_configuration.md rename to docs/src/guides/external-node/02_configuration.md diff --git a/docs/guides/external-node/03_running.md b/docs/src/guides/external-node/03_running.md similarity index 100% rename from docs/guides/external-node/03_running.md rename to docs/src/guides/external-node/03_running.md diff --git a/docs/guides/external-node/04_observability.md b/docs/src/guides/external-node/04_observability.md similarity index 100% rename from docs/guides/external-node/04_observability.md rename to docs/src/guides/external-node/04_observability.md diff --git a/docs/guides/external-node/05_troubleshooting.md b/docs/src/guides/external-node/05_troubleshooting.md similarity index 100% rename from docs/guides/external-node/05_troubleshooting.md rename to docs/src/guides/external-node/05_troubleshooting.md diff --git a/docs/guides/external-node/06_components.md b/docs/src/guides/external-node/06_components.md similarity index 100% rename from docs/guides/external-node/06_components.md rename to docs/src/guides/external-node/06_components.md diff --git a/docs/guides/external-node/07_snapshots_recovery.md b/docs/src/guides/external-node/07_snapshots_recovery.md similarity index 100% rename from docs/guides/external-node/07_snapshots_recovery.md rename to docs/src/guides/external-node/07_snapshots_recovery.md diff --git a/docs/guides/external-node/08_pruning.md b/docs/src/guides/external-node/08_pruning.md similarity index 94% rename from docs/guides/external-node/08_pruning.md rename to docs/src/guides/external-node/08_pruning.md index 7f7dfc34d4a9..06bd9f8d8a9d 100644 --- a/docs/guides/external-node/08_pruning.md +++ b/docs/src/guides/external-node/08_pruning.md @@ -53,6 +53,12 @@ be pruned after it has been executed on Ethereum. Pruning can be disabled or enabled and the data retention period can be freely changed during the node lifetime. +> [!WARNING] +> +> Pruning should be disabled when recovering the Merkle tree (e.g., if a node ran in +> [the treeless mode](09_treeless_mode.md) before, or if its tree needs a reset for whatever reason). Otherwise, tree +> recovery will with almost definitely result in an error, or worse, in a corrupted tree. + ## Storage requirements for pruned nodes The storage requirements depend on how long you configure to retain the data, but are roughly: diff --git a/docs/guides/external-node/09_treeless_mode.md b/docs/src/guides/external-node/09_treeless_mode.md similarity index 92% rename from docs/guides/external-node/09_treeless_mode.md rename to docs/src/guides/external-node/09_treeless_mode.md index 59e6f6412d31..ceeea6f86c67 100644 --- a/docs/guides/external-node/09_treeless_mode.md +++ b/docs/src/guides/external-node/09_treeless_mode.md @@ -59,12 +59,6 @@ or not running it when initializing a node. > (order of 2–3 hours for the mainnet) because the node no longer needs to recover the Merkle tree before starting > catching up. -> [!WARNING] -> -> In contrast to the tree fetcher, the Merkle tree cannot be safely switched on after a significant delay if pruning is -> enabled (some data necessary for tree update may have been pruned while the tree was off). We plan to fix this flaw in -> the future. If pruning is disabled, the Merkle tree _can_ be freely switched on / off. - ## Monitoring tree fetcher Tree fetcher information is logged with the `zksync_node_sync::tree_data_fetcher` target. diff --git a/docs/src/guides/external-node/10_decentralization.md b/docs/src/guides/external-node/10_decentralization.md new file mode 100644 index 000000000000..f2b1782c2d72 --- /dev/null +++ b/docs/src/guides/external-node/10_decentralization.md @@ -0,0 +1,91 @@ +# Decentralization + +In the default setup, the ZKsync node will fetch data from the ZKsync API endpoint maintained by Matter Labs. To reduce +the reliance on this centralized endpoint we have developed a decentralized p2p networking stack (aka gossipnet) which +will eventually be used instead of ZKsync API for synchronizing data. + +On the gossipnet, the data integrity will be protected by the BFT (byzantine fault-tolerant) consensus algorithm +(currently data is signed just by the main node though). + +## Enabling gossipnet on your node + +> [!NOTE] +> +> Because the data transmitted over the gossipnet is signed by the main node (and eventually by the consensus quorum), +> the signatures need to be backfilled to the node's local storage the first time you switch from centralized (ZKsync +> API based) synchronization to the decentralized (gossipnet based) synchronization (this is a one-time thing). With the +> current implementation it may take a couple of hours and gets faster the more nodes you add to the +> `gossip_static_outbound` list (see below). We are working to remove this inconvenience. + +> [!NOTE] +> +> The minimal supported server version for this is +> [24.11.0](https://github.com/matter-labs/zksync-era/releases/tag/core-v24.11.0) + +### Generating secrets + +Each participant node of the gossipnet has to have an identity (a public/secret key pair). When running your node for +the first time, generate the secrets by running: + +``` +docker run --entrypoint /usr/bin/zksync_external_node "matterlabs/external-node:2.0-v25.1.0" generate-secrets > consensus_secrets.yaml +chmod 600 consensus_secrets.yaml +``` + +> [!NOTE] +> +> NEVER reveal the secret keys used by your node. Otherwise, someone can impersonate your node on the gossipnet. If you +> suspect that your secret key has been leaked, you can generate fresh keys using the same tool. +> +> If you want someone else to connect to your node, give them your PUBLIC key instead. Both public and secret keys are +> present in the `consensus_secrets.yaml` (public keys are in comments). + +### Preparing configuration file + +Copy the template of the consensus configuration file (for +[mainnet](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml) +or +[testnet](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml) +). + +> [!NOTE] +> +> You need to fill in the `public_addr` field. This is the address that will (not implemented yet) be advertised over +> gossipnet to other nodes, so that they can establish connections to your node. If you don't want to expose your node +> to the public internet, you can use IP in your local network. + +Currently the config contains the following fields (refer to config +[schema](https://github.com/matter-labs/zksync-era/blob/990676c5f84afd2ff8cd337f495c82e8d1f305a4/core/lib/protobuf_config/src/proto/core/consensus.proto#L66) +for more details): + +- `server_addr` - local TCP socket address that the node should listen on for incoming connections. Note that this is an + additional TCP port that will be opened by the node. +- `public_addr` - the public address of your node that will be advertised over the gossipnet. +- `max_payload_size` - limit (in bytes) on the sized of the ZKsync ERA block received from the gossipnet. This protects + your node from getting DoS`ed by too large network messages. Use the value from the template. +- `gossip_dynamic_inbound_limit` - maximal number of unauthenticated concurrent inbound connections that can be + established to your node. This is a DDoS protection measure. +- `gossip_static_outbound` - list of trusted peers that your node should always try to connect to. The template contains + the nodes maintained by Matterlabs, but you can add more if you know any. Note that the list contains both the network + address AND the public key of the node - this prevents spoofing attacks. + +### Setting environment variables + +Uncomment (or add) the following lines in your `.env` config: + +``` +EN_CONSENSUS_CONFIG_PATH=... +EN_CONSENSUS_SECRETS_PATH=... +``` + +These variables should point to your consensus config and secrets files that we have just created. Tweak the paths to +the files if you have placed them differently. + +### Add `--enable-consensus` flag to your entry point command + +For the consensus configuration to take effect you have to add `--enable-consensus` flag to the command line when +running the node, for example: + +``` +docker run "matterlabs/external-node:2.0-v24.12.0" --enable-consensus +``` diff --git a/docs/src/guides/external-node/README.md b/docs/src/guides/external-node/README.md new file mode 100644 index 000000000000..becd9846d4f2 --- /dev/null +++ b/docs/src/guides/external-node/README.md @@ -0,0 +1 @@ +# External node diff --git a/docs/guides/external-node/docker-compose-examples/configs/generate_secrets.sh b/docs/src/guides/external-node/docker-compose-examples/configs/generate_secrets.sh similarity index 100% rename from docs/guides/external-node/docker-compose-examples/configs/generate_secrets.sh rename to docs/src/guides/external-node/docker-compose-examples/configs/generate_secrets.sh diff --git a/docs/src/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml b/docs/src/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml new file mode 100644 index 000000000000..08f5861daa83 --- /dev/null +++ b/docs/src/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml @@ -0,0 +1,5 @@ +server_addr: '0.0.0.0:3054' +public_addr: '127.0.0.1:3054' +debug_page_addr: '0.0.0.0:5000' +max_payload_size: 5000000 +gossip_dynamic_inbound_limit: 100 diff --git a/docs/src/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml b/docs/src/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml new file mode 100644 index 000000000000..08f5861daa83 --- /dev/null +++ b/docs/src/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml @@ -0,0 +1,5 @@ +server_addr: '0.0.0.0:3054' +public_addr: '127.0.0.1:3054' +debug_page_addr: '0.0.0.0:5000' +max_payload_size: 5000000 +gossip_dynamic_inbound_limit: 100 diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json b/docs/src/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json similarity index 100% rename from docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json rename to docs/src/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json b/docs/src/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json similarity index 100% rename from docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json rename to docs/src/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml b/docs/src/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml similarity index 100% rename from docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml rename to docs/src/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/datasources/prometheus.yml b/docs/src/guides/external-node/docker-compose-examples/grafana/provisioning/datasources/prometheus.yml similarity index 100% rename from docs/guides/external-node/docker-compose-examples/grafana/provisioning/datasources/prometheus.yml rename to docs/src/guides/external-node/docker-compose-examples/grafana/provisioning/datasources/prometheus.yml diff --git a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml b/docs/src/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml similarity index 97% rename from docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml rename to docs/src/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml index 9c8c5bb31425..5ee9de187bf0 100644 --- a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml +++ b/docs/src/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml @@ -52,7 +52,7 @@ services: # Generation of consensus secrets. # The secrets are generated iff the secrets file doesn't already exist. generate-secrets: - image: "matterlabs/external-node:2.0-v24.16.0" + image: "matterlabs/external-node:2.0-v25.1.0" entrypoint: [ "/configs/generate_secrets.sh", @@ -61,7 +61,7 @@ services: volumes: - ./configs:/configs external-node: - image: "matterlabs/external-node:2.0-v24.16.0" + image: "matterlabs/external-node:2.0-v25.1.0" entrypoint: [ "/usr/bin/entrypoint.sh", diff --git a/docs/guides/external-node/docker-compose-examples/prometheus/prometheus.yml b/docs/src/guides/external-node/docker-compose-examples/prometheus/prometheus.yml similarity index 100% rename from docs/guides/external-node/docker-compose-examples/prometheus/prometheus.yml rename to docs/src/guides/external-node/docker-compose-examples/prometheus/prometheus.yml diff --git a/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml b/docs/src/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml similarity index 100% rename from docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml rename to docs/src/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml diff --git a/docs/guides/external-node/prepared_configs/mainnet-config.env b/docs/src/guides/external-node/prepared_configs/mainnet-config.env similarity index 98% rename from docs/guides/external-node/prepared_configs/mainnet-config.env rename to docs/src/guides/external-node/prepared_configs/mainnet-config.env index bce812084665..eac24f4ab7ed 100644 --- a/docs/guides/external-node/prepared_configs/mainnet-config.env +++ b/docs/src/guides/external-node/prepared_configs/mainnet-config.env @@ -70,7 +70,7 @@ RUST_LOG=zksync_core=debug,zksync_dal=info,zksync_eth_client=info,zksync_merkle_ RUST_BACKTRACE=full RUST_LIB_BACKTRACE=1 -# Settings related to gossip network, see `09_decentralization.md` +# Settings related to gossip network, see `10_decentralization.md` #EN_CONSENSUS_CONFIG_PATH=./mainnet_consensus_config.yaml #EN_CONSENSUS_SECRETS_PATH=./consensus_secrets.yaml diff --git a/docs/src/guides/external-node/prepared_configs/mainnet_consensus_config.yaml b/docs/src/guides/external-node/prepared_configs/mainnet_consensus_config.yaml new file mode 100644 index 000000000000..08347a14efa0 --- /dev/null +++ b/docs/src/guides/external-node/prepared_configs/mainnet_consensus_config.yaml @@ -0,0 +1,4 @@ +server_addr: '0.0.0.0:3054' +public_addr: ':3054' +max_payload_size: 5000000 +gossip_dynamic_inbound_limit: 100 diff --git a/docs/guides/external-node/prepared_configs/testnet-goerli-config-deprecated.env b/docs/src/guides/external-node/prepared_configs/testnet-goerli-config-deprecated.env similarity index 100% rename from docs/guides/external-node/prepared_configs/testnet-goerli-config-deprecated.env rename to docs/src/guides/external-node/prepared_configs/testnet-goerli-config-deprecated.env diff --git a/docs/guides/external-node/prepared_configs/testnet-sepolia-config.env b/docs/src/guides/external-node/prepared_configs/testnet-sepolia-config.env similarity index 98% rename from docs/guides/external-node/prepared_configs/testnet-sepolia-config.env rename to docs/src/guides/external-node/prepared_configs/testnet-sepolia-config.env index 182012e2850c..c8f855b4a4a2 100644 --- a/docs/guides/external-node/prepared_configs/testnet-sepolia-config.env +++ b/docs/src/guides/external-node/prepared_configs/testnet-sepolia-config.env @@ -70,7 +70,7 @@ RUST_LOG=zksync_core=debug,zksync_dal=info,zksync_eth_client=info,zksync_merkle_ RUST_BACKTRACE=full RUST_LIB_BACKTRACE=1 -# Settings related to gossip network, see `09_decentralization.md` +# Settings related to gossip network, see `10_decentralization.md` #EN_CONSENSUS_CONFIG_PATH=./testnet_consensus_config.yaml #EN_CONSENSUS_SECRETS_PATH=./consensus_secrets.yaml diff --git a/docs/src/guides/external-node/prepared_configs/testnet_consensus_config.yaml b/docs/src/guides/external-node/prepared_configs/testnet_consensus_config.yaml new file mode 100644 index 000000000000..08347a14efa0 --- /dev/null +++ b/docs/src/guides/external-node/prepared_configs/testnet_consensus_config.yaml @@ -0,0 +1,4 @@ +server_addr: '0.0.0.0:3054' +public_addr: ':3054' +max_payload_size: 5000000 +gossip_dynamic_inbound_limit: 100 diff --git a/docs/src/guides/launch.md b/docs/src/guides/launch.md new file mode 100644 index 000000000000..52872a53cf2a --- /dev/null +++ b/docs/src/guides/launch.md @@ -0,0 +1,202 @@ +# Running the application + +This document covers common scenarios for launching ZKsync applications set locally. + +## Prerequisites + +Prepare dev environment prerequisites: see + +[Installing dependencies](./setup-dev.md) + +## Setup local dev environment + +Run the required containers with: + +```bash +zkstack containers +``` + +Setup: + +```bash +zkstack ecosystem init +``` + +To completely reset the dev environment: + +- Stop services: + + ```bash + zkstack dev clean all + ``` + +- Repeat the setup procedure above + + ```bash + zkstack containers + zkstack ecosystem init + ``` + +### Run observability stack + +If you want to run [Dockprom](https://github.com/stefanprodan/dockprom/) stack (Prometheus, Grafana) alongside other +containers - add `--observability` parameter during initialisation. + +```bash +zkstack containers --observability +``` + +or select `yes` when prompted during the interactive execution of the command. + +That will also provision Grafana with +[era-observability](https://github.com/matter-labs/era-observability/tree/main/dashboards) dashboards. You can then +access it at `http://127.0.0.1:3000/` under credentials `admin/admin`. + +> If you don't see any data displayed on the Grafana dashboards - try setting the timeframe to "Last 30 minutes". You +> will also have to have `jq` installed on your system. + +## Ecosystem Configuration + +The ecosystem configuration is spread across multiple files and directories: + +1. Root level: + + - `ZkStack.yaml`: Main configuration file for the entire ecosystem. + +2. `configs/` directory: + + - `apps/`: + - `portal_config.json`: Configuration for the portal application. + - `contracts.yaml`: Defines smart contract settings and addresses. + - `erc20.yaml`: Configuration for ERC20 tokens. + - `initial_deployments.yaml`: Specifies initial ERC20 token deployments. + - `wallets.yaml`: Contains wallet configurations. + +3. `chains//` directory: + + - `artifacts/`: Contains build/execution artifacts. + - `configs/`: Chain-specific configuration files. + - `contracts.yaml`: Chain-specific smart contract settings. + - `external_node.yaml`: Configuration for external nodes. + - `general.yaml`: General chain configuration. + - `genesis.yaml`: Genesis configuration for the chain. + - `secrets.yaml`: Secrets and private keys for the chain. + - `wallets.yaml`: Wallet configurations for the chain. + - `db/main/`: Database files for the chain. + - `ZkStack.yaml`: Chain-specific ZkStack configuration. + +These configuration files are automatically generated during the ecosystem initialization (`zkstack ecosystem init`) and +chain initialization (`zkstack chain init`) processes. They control various aspects of the ZKsync ecosystem, including: + +- Network settings +- Smart contract deployments +- Token configurations +- Database settings +- Application/Service-specific parameters + +It's important to note that while these files can be manually edited, any changes may be overwritten if the ecosystem or +chain is reinitialized. Always back up your modifications and exercise caution when making direct changes to these +files. + +For specific configuration needs, it's recommended to use the appropriate `zkstack` commands or consult the +documentation for safe ways to customize your setup. + +## Build and run server + +Run server: + +```bash +zkstack server +``` + +The server's configuration files can be found in `/chains//configs` directory. These files are created when +running `zkstack chain init` command. + +### Modifying configuration files manually + +To manually modify configuration files: + +1. Locate the relevant config file in `/chains//configs` +2. Open the file in a text editor +3. Make necessary changes, following the existing format +4. Save the file +5. Restart the relevant services for changes to take effect: + +```bash +zkstack server +``` + +> NOTE: Manual changes to configuration files may be overwritten if the ecosystem is reinitialized or the chain is +> reinitialized. + +> WARNING: Some properties, such as ports, may require manual modification across different configuration files to +> ensure consistency and avoid conflicts. + +## Running server using Google cloud storage object store instead of default In memory store + +Get the `service_account.json` file containing the GCP credentials from kubernetes secret for relevant +environment(stage2/ testnet2) add that file to the default location `~/gcloud/service_account.json` or update +`object_store.toml` with the file location + +```bash +zkstack prover init --bucket-base-url={url} --credentials-file={path/to/service_account.json} +``` + +## Running prover server + +Running on a machine with GPU + +```bash +zkstack prover run --component=prover +``` + +> NOTE: Running on machine without GPU is currently not supported by `zkstack`. + +## Running the verification key generator + +```bash +# ensure that the setup_2^26.key in the current directory, the file can be download from https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + +# To generate all verification keys +cargo run --release --bin zksync_verification_key_generator +``` + +## Generating binary verification keys for existing json verification keys + +```bash +cargo run --release --bin zksync_json_to_binary_vk_converter -- -o /path/to/output-binary-vk +``` + +## Generating commitment for existing verification keys + +```bash +cargo run --release --bin zksync_commitment_generator +``` + +## Running the contract verifier + +```bash +zkstack contract-verifier run +``` + +## Troubleshooting + +### Connection Refused + +#### Problem + +```bash +error sending request for url (http://127.0.0.1:8545/): error trying to connect: tcp connect error: Connection refused (os error 61) +``` + +#### Description + +It appears that no containers are currently running, which is likely the reason you're encountering this error. + +#### Solution + +Ensure that the necessary containers have been started and are functioning correctly to resolve the issue. + +```bash +zkstack containers +``` diff --git a/docs/guides/repositories.md b/docs/src/guides/repositories.md similarity index 100% rename from docs/guides/repositories.md rename to docs/src/guides/repositories.md diff --git a/docs/guides/setup-dev.md b/docs/src/guides/setup-dev.md similarity index 84% rename from docs/guides/setup-dev.md rename to docs/src/guides/setup-dev.md index 4eef211cd3d1..43350ac3314d 100644 --- a/docs/guides/setup-dev.md +++ b/docs/src/guides/setup-dev.md @@ -14,20 +14,20 @@ git config --global url."https://".insteadOf git:// # Rust curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh + # NVM curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash + # All necessary stuff sudo apt-get update -sudo apt-get install build-essential pkg-config cmake clang lldb lld libssl-dev postgresql apt-transport-https ca-certificates curl software-properties-common +sudo apt-get install -y build-essential pkg-config cmake clang lldb lld libssl-dev libpq-dev apt-transport-https ca-certificates curl software-properties-common + # Install docker curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable" sudo apt install docker-ce sudo usermod -aG docker ${USER} -# Stop default postgres (as we'll use the docker one) -sudo systemctl stop postgresql -sudo systemctl disable postgresql # Start docker. sudo systemctl start docker @@ -45,9 +45,9 @@ cargo install cargo-nextest # SQL tools cargo install sqlx-cli --version 0.8.1 -# Foundry -curl -L https://foundry.paradigm.xyz | bash -foundryup --branch master +# Foundry ZKsync +curl -L https://raw.githubusercontent.com/matter-labs/foundry-zksync/main/install-foundry-zksync | bash +foundryup-zksync # Non CUDA (GPU) setup, can be skipped if the machine has a CUDA installed for provers # Don't do that if you intend to run provers on your machine. Check the prover docs for a setup instead. @@ -60,24 +60,24 @@ cd zksync-era git submodule update --init --recursive ``` -Don't forget to [add env variables](#Environment) and look at [tips](#tips). +Don't forget to look at [tips](#tips). ## Supported operating systems -ZKsync currently can be launched on any \*nix operating system (e.g. any linux distribution or MacOS). +ZKsync currently can be launched on any \*nix operating system (e.g. any linux distribution or macOS). If you're using Windows, then make sure to use WSL 2. Additionally, if you are going to use WSL 2, make sure that your project is located in the _linux filesystem_, since accessing NTFS partitions from within WSL is very slow. -If you're using MacOS with an ARM processor (e.g. M1/M2), make sure that you are working in the _native_ environment -(e.g. your terminal and IDE don't run in Rosetta, and your toolchain is native). Trying to work with ZKsync code via +If you're using macOS with an ARM processor (e.g. M1/M2), make sure that you are working in the _native_ environment +(e.g., your terminal and IDE don't run in Rosetta, and your toolchain is native). Trying to work with ZKsync code via Rosetta may cause problems that are hard to spot and debug, so make sure to check everything before you start. If you are a NixOS user or would like to have a reproducible environment, skip to the section about `nix`. -## `Docker` +## Docker Install `docker`. It is recommended to follow the instructions from the [official site](https://docs.docker.com/install/). @@ -117,13 +117,13 @@ at this step. If logging out does not resolve the issue, restarting the computer should. -## `Node` & `Yarn` +## Node.js & Yarn 1. Install `Node` (requires version `v20`). The recommended way is via [nvm](https://github.com/nvm-sh/nvm). 2. Install `yarn`. Can be done via `npm install -g yarn`. Make sure to get version 1.22.19 - you can change the version by running `yarn set version 1.22.19`. -## `clang` +## clang In order to compile RocksDB, you must have LLVM available. On debian-based linux it can be installed as follows: @@ -133,12 +133,12 @@ On debian-based linux: sudo apt-get install build-essential pkg-config cmake clang lldb lld ``` -On mac: +On macOS: You need to have an up-to-date `Xcode`. You can install it directly from `App Store`. With Xcode command line tools, you get the Clang compiler installed by default. Thus, having XCode you don't need to install `clang`. -## `OpenSSL` +## OpenSSL Install OpenSSL: @@ -154,9 +154,9 @@ On debian-based linux: sudo apt-get install libssl-dev ``` -## `Rust` +## Rust -Install the latest `rust` version. +Install `Rust`'s toolchain version reported in `/rust-toolchain.toml` (also a later stable version should work). Instructions can be found on the [official site](https://www.rust-lang.org/tools/install). @@ -167,7 +167,7 @@ rustc --version rustc 1.xx.y (xxxxxx 20xx-yy-zz) # Output may vary depending on actual version of rust ``` -If you are using MacOS with ARM processor (e.g. M1/M2), make sure that you use an `aarch64` toolchain. For example, when +If you are using macOS with ARM processor (e.g. M1/M2), make sure that you use an `aarch64` toolchain. For example, when you run `rustup show`, you should see a similar input: ```bash @@ -190,25 +190,26 @@ If you see `x86_64` mentioned in the output, probably you're running (or used to that's the case, you should probably change the way you run terminal, and/or reinstall your IDE, and then reinstall the Rust toolchain as well. -## Postgres +## PostgreSQL Client Library -Install the latest postgres: +For development purposes, you typically only need the PostgreSQL client library, not the full server installation. +Here's how to install it: -On mac: +On macOS: ```bash -brew install postgresql@14 +brew install libpq ``` -On debian-based linux: +On Debian-based Linux: ```bash -sudo apt-get install postgresql +sudo apt-get install libpq-dev ``` ### Cargo nextest -[cargo-nextest](https://nexte.st/) is the next-generation test runner for Rust projects. `zk test rust` uses +[cargo-nextest](https://nexte.st/) is the next-generation test runner for Rust projects. `zkstack dev test rust` uses `cargo nextest` by default. ```bash @@ -236,10 +237,13 @@ enable nix-ld. Go to the zksync folder and run `nix develop`. After it finishes, you are in a shell that has all the dependencies. -## Foundry +## Foundry ZKsync + +ZKSync depends on Foundry ZKsync (which is is a specialized fork of Foundry, tailored for ZKsync). Please follow this +[installation guide](https://foundry-book.zksync.io/getting-started/installation) to get started with Foundry ZKsync. -[Foundry](https://book.getfoundry.sh/getting-started/installation) can be utilized for deploying smart contracts. For -commands related to deployment, you can pass flags for Foundry integration. +Foundry ZKsync can also be used for deploying smart contracts. For commands related to deployment, you can pass flags +for Foundry integration. ## Non-GPU setup @@ -266,17 +270,6 @@ RUSTFLAGS as env var, or pass it in `config.toml` (either project level or globa rustflags = ["--cfg=no_cuda"] ``` -## Environment - -Edit the lines below and add them to your shell profile file (e.g. `~/.bash_profile`, `~/.zshrc`): - -```bash -# Add path here: -export ZKSYNC_HOME=/path/to/zksync - -export PATH=$ZKSYNC_HOME/bin:$PATH -``` - ## Tips ### Tip: `mold` @@ -294,7 +287,7 @@ export RUSTFLAGS='-C link-arg=-fuse-ld=/usr/local/bin/mold' export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="clang" ``` -## Tip: Speeding up building `RocksDB` +### Tip: Speeding up building `RocksDB` By default, each time you compile `rocksdb` crate, it will compile required C++ sources from scratch. It can be avoided by using precompiled versions of library, and it will significantly improve your build times. diff --git a/docs/src/misc/contributors.md b/docs/src/misc/contributors.md new file mode 100644 index 000000000000..77e81149e465 --- /dev/null +++ b/docs/src/misc/contributors.md @@ -0,0 +1 @@ +# Contributors diff --git a/docs/specs/README.md b/docs/src/specs/README.md similarity index 100% rename from docs/specs/README.md rename to docs/src/specs/README.md diff --git a/docs/specs/blocks_batches.md b/docs/src/specs/blocks_batches.md similarity index 100% rename from docs/specs/blocks_batches.md rename to docs/src/specs/blocks_batches.md diff --git a/docs/specs/data_availability/README.md b/docs/src/specs/data_availability/README.md similarity index 100% rename from docs/specs/data_availability/README.md rename to docs/src/specs/data_availability/README.md diff --git a/docs/specs/data_availability/compression.md b/docs/src/specs/data_availability/compression.md similarity index 100% rename from docs/specs/data_availability/compression.md rename to docs/src/specs/data_availability/compression.md diff --git a/docs/specs/data_availability/overview.md b/docs/src/specs/data_availability/overview.md similarity index 100% rename from docs/specs/data_availability/overview.md rename to docs/src/specs/data_availability/overview.md diff --git a/docs/specs/data_availability/pubdata.md b/docs/src/specs/data_availability/pubdata.md similarity index 100% rename from docs/specs/data_availability/pubdata.md rename to docs/src/specs/data_availability/pubdata.md diff --git a/docs/specs/data_availability/reconstruction.md b/docs/src/specs/data_availability/reconstruction.md similarity index 100% rename from docs/specs/data_availability/reconstruction.md rename to docs/src/specs/data_availability/reconstruction.md diff --git a/docs/specs/data_availability/validium_zk_porter.md b/docs/src/specs/data_availability/validium_zk_porter.md similarity index 100% rename from docs/specs/data_availability/validium_zk_porter.md rename to docs/src/specs/data_availability/validium_zk_porter.md diff --git a/docs/specs/img/L2_Components.png b/docs/src/specs/img/L2_Components.png similarity index 100% rename from docs/specs/img/L2_Components.png rename to docs/src/specs/img/L2_Components.png diff --git a/docs/specs/img/diamondProxy.jpg b/docs/src/specs/img/diamondProxy.jpg similarity index 100% rename from docs/specs/img/diamondProxy.jpg rename to docs/src/specs/img/diamondProxy.jpg diff --git a/docs/specs/img/governance.jpg b/docs/src/specs/img/governance.jpg similarity index 100% rename from docs/specs/img/governance.jpg rename to docs/src/specs/img/governance.jpg diff --git a/docs/specs/img/zk-the-collective-action.jpeg b/docs/src/specs/img/zk-the-collective-action.jpeg similarity index 100% rename from docs/specs/img/zk-the-collective-action.jpeg rename to docs/src/specs/img/zk-the-collective-action.jpeg diff --git a/docs/specs/introduction.md b/docs/src/specs/introduction.md similarity index 100% rename from docs/specs/introduction.md rename to docs/src/specs/introduction.md diff --git a/docs/specs/l1_l2_communication/README.md b/docs/src/specs/l1_l2_communication/README.md similarity index 100% rename from docs/specs/l1_l2_communication/README.md rename to docs/src/specs/l1_l2_communication/README.md diff --git a/docs/specs/l1_l2_communication/l1_to_l2.md b/docs/src/specs/l1_l2_communication/l1_to_l2.md similarity index 100% rename from docs/specs/l1_l2_communication/l1_to_l2.md rename to docs/src/specs/l1_l2_communication/l1_to_l2.md diff --git a/docs/specs/l1_l2_communication/l2_to_l1.md b/docs/src/specs/l1_l2_communication/l2_to_l1.md similarity index 100% rename from docs/specs/l1_l2_communication/l2_to_l1.md rename to docs/src/specs/l1_l2_communication/l2_to_l1.md diff --git a/docs/specs/l1_l2_communication/overview_deposits_withdrawals.md b/docs/src/specs/l1_l2_communication/overview_deposits_withdrawals.md similarity index 100% rename from docs/specs/l1_l2_communication/overview_deposits_withdrawals.md rename to docs/src/specs/l1_l2_communication/overview_deposits_withdrawals.md diff --git a/docs/specs/l1_smart_contracts.md b/docs/src/specs/l1_smart_contracts.md similarity index 100% rename from docs/specs/l1_smart_contracts.md rename to docs/src/specs/l1_smart_contracts.md diff --git a/docs/specs/overview.md b/docs/src/specs/overview.md similarity index 100% rename from docs/specs/overview.md rename to docs/src/specs/overview.md diff --git a/docs/specs/prover/README.md b/docs/src/specs/prover/README.md similarity index 100% rename from docs/specs/prover/README.md rename to docs/src/specs/prover/README.md diff --git a/docs/specs/prover/boojum_function_check_if_satisfied.md b/docs/src/specs/prover/boojum_function_check_if_satisfied.md similarity index 100% rename from docs/specs/prover/boojum_function_check_if_satisfied.md rename to docs/src/specs/prover/boojum_function_check_if_satisfied.md diff --git a/docs/specs/prover/boojum_gadgets.md b/docs/src/specs/prover/boojum_gadgets.md similarity index 100% rename from docs/specs/prover/boojum_gadgets.md rename to docs/src/specs/prover/boojum_gadgets.md diff --git a/docs/specs/prover/circuit_testing.md b/docs/src/specs/prover/circuit_testing.md similarity index 100% rename from docs/specs/prover/circuit_testing.md rename to docs/src/specs/prover/circuit_testing.md diff --git a/docs/specs/prover/circuits/README.md b/docs/src/specs/prover/circuits/README.md similarity index 100% rename from docs/specs/prover/circuits/README.md rename to docs/src/specs/prover/circuits/README.md diff --git a/docs/specs/prover/circuits/code_decommitter.md b/docs/src/specs/prover/circuits/code_decommitter.md similarity index 100% rename from docs/specs/prover/circuits/code_decommitter.md rename to docs/src/specs/prover/circuits/code_decommitter.md diff --git a/docs/specs/prover/circuits/demux_log_queue.md b/docs/src/specs/prover/circuits/demux_log_queue.md similarity index 100% rename from docs/specs/prover/circuits/demux_log_queue.md rename to docs/src/specs/prover/circuits/demux_log_queue.md diff --git a/docs/specs/prover/circuits/ecrecover.md b/docs/src/specs/prover/circuits/ecrecover.md similarity index 100% rename from docs/specs/prover/circuits/ecrecover.md rename to docs/src/specs/prover/circuits/ecrecover.md diff --git a/docs/specs/prover/circuits/img/diagram.png b/docs/src/specs/prover/circuits/img/diagram.png similarity index 100% rename from docs/specs/prover/circuits/img/diagram.png rename to docs/src/specs/prover/circuits/img/diagram.png diff --git a/docs/specs/prover/circuits/img/flowchart.png b/docs/src/specs/prover/circuits/img/flowchart.png similarity index 100% rename from docs/specs/prover/circuits/img/flowchart.png rename to docs/src/specs/prover/circuits/img/flowchart.png diff --git a/docs/specs/prover/circuits/img/image.png b/docs/src/specs/prover/circuits/img/image.png similarity index 100% rename from docs/specs/prover/circuits/img/image.png rename to docs/src/specs/prover/circuits/img/image.png diff --git a/docs/specs/prover/circuits/keccak_round_function.md b/docs/src/specs/prover/circuits/keccak_round_function.md similarity index 100% rename from docs/specs/prover/circuits/keccak_round_function.md rename to docs/src/specs/prover/circuits/keccak_round_function.md diff --git a/docs/specs/prover/circuits/l1_messages_hasher.md b/docs/src/specs/prover/circuits/l1_messages_hasher.md similarity index 100% rename from docs/specs/prover/circuits/l1_messages_hasher.md rename to docs/src/specs/prover/circuits/l1_messages_hasher.md diff --git a/docs/specs/prover/circuits/log_sorter.md b/docs/src/specs/prover/circuits/log_sorter.md similarity index 100% rename from docs/specs/prover/circuits/log_sorter.md rename to docs/src/specs/prover/circuits/log_sorter.md diff --git a/docs/specs/prover/circuits/main_vm.md b/docs/src/specs/prover/circuits/main_vm.md similarity index 100% rename from docs/specs/prover/circuits/main_vm.md rename to docs/src/specs/prover/circuits/main_vm.md diff --git a/docs/specs/prover/circuits/overview.md b/docs/src/specs/prover/circuits/overview.md similarity index 100% rename from docs/specs/prover/circuits/overview.md rename to docs/src/specs/prover/circuits/overview.md diff --git a/docs/specs/prover/circuits/ram_permutation.md b/docs/src/specs/prover/circuits/ram_permutation.md similarity index 100% rename from docs/specs/prover/circuits/ram_permutation.md rename to docs/src/specs/prover/circuits/ram_permutation.md diff --git a/docs/specs/prover/circuits/sha256_round_function.md b/docs/src/specs/prover/circuits/sha256_round_function.md similarity index 100% rename from docs/specs/prover/circuits/sha256_round_function.md rename to docs/src/specs/prover/circuits/sha256_round_function.md diff --git a/docs/specs/prover/circuits/sort_decommitments.md b/docs/src/specs/prover/circuits/sort_decommitments.md similarity index 100% rename from docs/specs/prover/circuits/sort_decommitments.md rename to docs/src/specs/prover/circuits/sort_decommitments.md diff --git a/docs/specs/prover/circuits/sorting.md b/docs/src/specs/prover/circuits/sorting.md similarity index 100% rename from docs/specs/prover/circuits/sorting.md rename to docs/src/specs/prover/circuits/sorting.md diff --git a/docs/specs/prover/circuits/sorting_and_deduplicating.md b/docs/src/specs/prover/circuits/sorting_and_deduplicating.md similarity index 100% rename from docs/specs/prover/circuits/sorting_and_deduplicating.md rename to docs/src/specs/prover/circuits/sorting_and_deduplicating.md diff --git a/docs/specs/prover/circuits/storage_application.md b/docs/src/specs/prover/circuits/storage_application.md similarity index 100% rename from docs/specs/prover/circuits/storage_application.md rename to docs/src/specs/prover/circuits/storage_application.md diff --git a/docs/specs/prover/circuits/storage_sorter.md b/docs/src/specs/prover/circuits/storage_sorter.md similarity index 100% rename from docs/specs/prover/circuits/storage_sorter.md rename to docs/src/specs/prover/circuits/storage_sorter.md diff --git a/docs/specs/prover/getting_started.md b/docs/src/specs/prover/getting_started.md similarity index 100% rename from docs/specs/prover/getting_started.md rename to docs/src/specs/prover/getting_started.md diff --git a/docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(1).png b/docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(1).png similarity index 100% rename from docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(1).png rename to docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(1).png diff --git a/docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(11).png b/docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(11).png similarity index 100% rename from docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(11).png rename to docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(11).png diff --git a/docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(12).png b/docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(12).png similarity index 100% rename from docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(12).png rename to docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(12).png diff --git a/docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(13).png b/docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(13).png similarity index 100% rename from docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(13).png rename to docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(13).png diff --git a/docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(14).png b/docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(14).png similarity index 100% rename from docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(14).png rename to docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(14).png diff --git a/docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(16).png b/docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(16).png similarity index 100% rename from docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(16).png rename to docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(16).png diff --git a/docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(17).png b/docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(17).png similarity index 100% rename from docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(17).png rename to docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(17).png diff --git a/docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(2).png b/docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(2).png similarity index 100% rename from docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(2).png rename to docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(2).png diff --git a/docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(3).png b/docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(3).png similarity index 100% rename from docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(3).png rename to docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(3).png diff --git a/docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(4).png b/docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(4).png similarity index 100% rename from docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(4).png rename to docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(4).png diff --git a/docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(7).png b/docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(7).png similarity index 100% rename from docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(7).png rename to docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(7).png diff --git a/docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(8).png b/docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(8).png similarity index 100% rename from docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(8).png rename to docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(8).png diff --git a/docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(9).png b/docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(9).png similarity index 100% rename from docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(9).png rename to docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied(9).png diff --git a/docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied.png b/docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied.png similarity index 100% rename from docs/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied.png rename to docs/src/specs/prover/img/boojum_function_check_if_satisfied/Check_if_satisfied.png diff --git a/docs/specs/prover/img/circuit_testing/Contest(10).png b/docs/src/specs/prover/img/circuit_testing/Contest(10).png similarity index 100% rename from docs/specs/prover/img/circuit_testing/Contest(10).png rename to docs/src/specs/prover/img/circuit_testing/Contest(10).png diff --git a/docs/specs/prover/img/circuit_testing/Contest(11).png b/docs/src/specs/prover/img/circuit_testing/Contest(11).png similarity index 100% rename from docs/specs/prover/img/circuit_testing/Contest(11).png rename to docs/src/specs/prover/img/circuit_testing/Contest(11).png diff --git a/docs/specs/prover/img/circuit_testing/Contest(12).png b/docs/src/specs/prover/img/circuit_testing/Contest(12).png similarity index 100% rename from docs/specs/prover/img/circuit_testing/Contest(12).png rename to docs/src/specs/prover/img/circuit_testing/Contest(12).png diff --git a/docs/specs/prover/img/circuit_testing/Contest(4).png b/docs/src/specs/prover/img/circuit_testing/Contest(4).png similarity index 100% rename from docs/specs/prover/img/circuit_testing/Contest(4).png rename to docs/src/specs/prover/img/circuit_testing/Contest(4).png diff --git a/docs/specs/prover/img/circuit_testing/Contest(5).png b/docs/src/specs/prover/img/circuit_testing/Contest(5).png similarity index 100% rename from docs/specs/prover/img/circuit_testing/Contest(5).png rename to docs/src/specs/prover/img/circuit_testing/Contest(5).png diff --git a/docs/specs/prover/img/circuit_testing/Contest(6).png b/docs/src/specs/prover/img/circuit_testing/Contest(6).png similarity index 100% rename from docs/specs/prover/img/circuit_testing/Contest(6).png rename to docs/src/specs/prover/img/circuit_testing/Contest(6).png diff --git a/docs/specs/prover/img/circuit_testing/Contest(7).png b/docs/src/specs/prover/img/circuit_testing/Contest(7).png similarity index 100% rename from docs/specs/prover/img/circuit_testing/Contest(7).png rename to docs/src/specs/prover/img/circuit_testing/Contest(7).png diff --git a/docs/specs/prover/img/circuit_testing/Contest(8).png b/docs/src/specs/prover/img/circuit_testing/Contest(8).png similarity index 100% rename from docs/specs/prover/img/circuit_testing/Contest(8).png rename to docs/src/specs/prover/img/circuit_testing/Contest(8).png diff --git a/docs/specs/prover/img/circuit_testing/Contest(9).png b/docs/src/specs/prover/img/circuit_testing/Contest(9).png similarity index 100% rename from docs/specs/prover/img/circuit_testing/Contest(9).png rename to docs/src/specs/prover/img/circuit_testing/Contest(9).png diff --git "a/docs/specs/prover/img/intro_to_zkSync\342\200\231s_ZK/circuit.png" "b/docs/src/specs/prover/img/intro_to_zkSync\342\200\231s_ZK/circuit.png" similarity index 100% rename from "docs/specs/prover/img/intro_to_zkSync\342\200\231s_ZK/circuit.png" rename to "docs/src/specs/prover/img/intro_to_zkSync\342\200\231s_ZK/circuit.png" diff --git a/docs/specs/prover/overview.md b/docs/src/specs/prover/overview.md similarity index 100% rename from docs/specs/prover/overview.md rename to docs/src/specs/prover/overview.md diff --git a/docs/specs/prover/zk_terminology.md b/docs/src/specs/prover/zk_terminology.md similarity index 100% rename from docs/specs/prover/zk_terminology.md rename to docs/src/specs/prover/zk_terminology.md diff --git a/docs/specs/zk_chains/README.md b/docs/src/specs/zk_chains/README.md similarity index 100% rename from docs/specs/zk_chains/README.md rename to docs/src/specs/zk_chains/README.md diff --git a/docs/specs/zk_chains/gateway.md b/docs/src/specs/zk_chains/gateway.md similarity index 100% rename from docs/specs/zk_chains/gateway.md rename to docs/src/specs/zk_chains/gateway.md diff --git a/docs/specs/zk_chains/img/contractsExternal.png b/docs/src/specs/zk_chains/img/contractsExternal.png similarity index 100% rename from docs/specs/zk_chains/img/contractsExternal.png rename to docs/src/specs/zk_chains/img/contractsExternal.png diff --git a/docs/specs/zk_chains/img/deployWeth.png b/docs/src/specs/zk_chains/img/deployWeth.png similarity index 100% rename from docs/specs/zk_chains/img/deployWeth.png rename to docs/src/specs/zk_chains/img/deployWeth.png diff --git a/docs/specs/zk_chains/img/depositWeth.png b/docs/src/specs/zk_chains/img/depositWeth.png similarity index 100% rename from docs/specs/zk_chains/img/depositWeth.png rename to docs/src/specs/zk_chains/img/depositWeth.png diff --git a/docs/specs/zk_chains/img/hyperbridges.png b/docs/src/specs/zk_chains/img/hyperbridges.png similarity index 100% rename from docs/specs/zk_chains/img/hyperbridges.png rename to docs/src/specs/zk_chains/img/hyperbridges.png diff --git a/docs/specs/zk_chains/img/hyperbridging.png b/docs/src/specs/zk_chains/img/hyperbridging.png similarity index 100% rename from docs/specs/zk_chains/img/hyperbridging.png rename to docs/src/specs/zk_chains/img/hyperbridging.png diff --git a/docs/specs/zk_chains/img/newChain.png b/docs/src/specs/zk_chains/img/newChain.png similarity index 100% rename from docs/specs/zk_chains/img/newChain.png rename to docs/src/specs/zk_chains/img/newChain.png diff --git a/docs/specs/zk_chains/interop.md b/docs/src/specs/zk_chains/interop.md similarity index 100% rename from docs/specs/zk_chains/interop.md rename to docs/src/specs/zk_chains/interop.md diff --git a/docs/specs/zk_chains/overview.md b/docs/src/specs/zk_chains/overview.md similarity index 100% rename from docs/specs/zk_chains/overview.md rename to docs/src/specs/zk_chains/overview.md diff --git a/docs/specs/zk_chains/shared_bridge.md b/docs/src/specs/zk_chains/shared_bridge.md similarity index 100% rename from docs/specs/zk_chains/shared_bridge.md rename to docs/src/specs/zk_chains/shared_bridge.md diff --git a/docs/specs/zk_evm/README.md b/docs/src/specs/zk_evm/README.md similarity index 100% rename from docs/specs/zk_evm/README.md rename to docs/src/specs/zk_evm/README.md diff --git a/docs/specs/zk_evm/account_abstraction.md b/docs/src/specs/zk_evm/account_abstraction.md similarity index 100% rename from docs/specs/zk_evm/account_abstraction.md rename to docs/src/specs/zk_evm/account_abstraction.md diff --git a/docs/specs/zk_evm/bootloader.md b/docs/src/specs/zk_evm/bootloader.md similarity index 100% rename from docs/specs/zk_evm/bootloader.md rename to docs/src/specs/zk_evm/bootloader.md diff --git a/docs/specs/zk_evm/fee_model.md b/docs/src/specs/zk_evm/fee_model.md similarity index 100% rename from docs/specs/zk_evm/fee_model.md rename to docs/src/specs/zk_evm/fee_model.md diff --git a/docs/specs/zk_evm/precompiles.md b/docs/src/specs/zk_evm/precompiles.md similarity index 100% rename from docs/specs/zk_evm/precompiles.md rename to docs/src/specs/zk_evm/precompiles.md diff --git a/docs/specs/zk_evm/system_contracts.md b/docs/src/specs/zk_evm/system_contracts.md similarity index 100% rename from docs/specs/zk_evm/system_contracts.md rename to docs/src/specs/zk_evm/system_contracts.md diff --git a/docs/specs/zk_evm/vm_overview.md b/docs/src/specs/zk_evm/vm_overview.md similarity index 100% rename from docs/specs/zk_evm/vm_overview.md rename to docs/src/specs/zk_evm/vm_overview.md diff --git a/docs/specs/zk_evm/vm_specification/EraVM_formal_specification.pdf b/docs/src/specs/zk_evm/vm_specification/EraVM_formal_specification.pdf similarity index 100% rename from docs/specs/zk_evm/vm_specification/EraVM_formal_specification.pdf rename to docs/src/specs/zk_evm/vm_specification/EraVM_formal_specification.pdf diff --git a/docs/specs/zk_evm/vm_specification/README.md b/docs/src/specs/zk_evm/vm_specification/README.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/README.md rename to docs/src/specs/zk_evm/vm_specification/README.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/README.md b/docs/src/specs/zk_evm/vm_specification/compiler/README.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/README.md rename to docs/src/specs/zk_evm/vm_specification/compiler/README.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/code_separation.md b/docs/src/specs/zk_evm/vm_specification/compiler/code_separation.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/code_separation.md rename to docs/src/specs/zk_evm/vm_specification/compiler/code_separation.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/evmla_translator.md b/docs/src/specs/zk_evm/vm_specification/compiler/evmla_translator.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/evmla_translator.md rename to docs/src/specs/zk_evm/vm_specification/compiler/evmla_translator.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/exception_handling.md b/docs/src/specs/zk_evm/vm_specification/compiler/exception_handling.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/exception_handling.md rename to docs/src/specs/zk_evm/vm_specification/compiler/exception_handling.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/README.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/README.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/README.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/README.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/evm/README.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/README.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/evm/README.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/README.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/evm/arithmetic.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/arithmetic.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/evm/arithmetic.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/arithmetic.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/evm/bitwise.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/bitwise.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/evm/bitwise.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/bitwise.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/evm/block.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/block.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/evm/block.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/block.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/evm/call.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/call.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/evm/call.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/call.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/evm/create.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/create.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/evm/create.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/create.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/evm/environment.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/environment.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/evm/environment.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/environment.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/evm/logging.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/logging.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/evm/logging.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/logging.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/evm/logical.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/logical.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/evm/logical.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/logical.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/evm/memory.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/memory.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/evm/memory.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/memory.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/evm/overview.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/overview.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/evm/overview.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/overview.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/evm/return.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/return.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/evm/return.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/return.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/evm/sha3.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/sha3.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/evm/sha3.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/sha3.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/evm/stack.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/stack.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/evm/stack.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/evm/stack.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/evmla.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/evmla.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/evmla.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/evmla.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/extensions/README.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/extensions/README.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/extensions/README.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/extensions/README.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/extensions/call.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/extensions/call.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/extensions/call.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/extensions/call.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/extensions/overview.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/extensions/overview.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/extensions/overview.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/extensions/overview.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/extensions/verbatim.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/extensions/verbatim.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/extensions/verbatim.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/extensions/verbatim.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/overview.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/overview.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/overview.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/overview.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/instructions/yul.md b/docs/src/specs/zk_evm/vm_specification/compiler/instructions/yul.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/instructions/yul.md rename to docs/src/specs/zk_evm/vm_specification/compiler/instructions/yul.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/overview.md b/docs/src/specs/zk_evm/vm_specification/compiler/overview.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/overview.md rename to docs/src/specs/zk_evm/vm_specification/compiler/overview.md diff --git a/docs/specs/zk_evm/vm_specification/compiler/system_contracts.md b/docs/src/specs/zk_evm/vm_specification/compiler/system_contracts.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/compiler/system_contracts.md rename to docs/src/specs/zk_evm/vm_specification/compiler/system_contracts.md diff --git a/docs/specs/zk_evm/vm_specification/img/arch-overview.png b/docs/src/specs/zk_evm/vm_specification/img/arch-overview.png similarity index 100% rename from docs/specs/zk_evm/vm_specification/img/arch-overview.png rename to docs/src/specs/zk_evm/vm_specification/img/arch-overview.png diff --git a/docs/specs/zk_evm/vm_specification/img/arithmetic_opcode.png b/docs/src/specs/zk_evm/vm_specification/img/arithmetic_opcode.png similarity index 100% rename from docs/specs/zk_evm/vm_specification/img/arithmetic_opcode.png rename to docs/src/specs/zk_evm/vm_specification/img/arithmetic_opcode.png diff --git a/docs/specs/zk_evm/vm_specification/zkSync_era_virtual_machine_primer.md b/docs/src/specs/zk_evm/vm_specification/zkSync_era_virtual_machine_primer.md similarity index 100% rename from docs/specs/zk_evm/vm_specification/zkSync_era_virtual_machine_primer.md rename to docs/src/specs/zk_evm/vm_specification/zkSync_era_virtual_machine_primer.md diff --git a/docs/theme/head.hbs b/docs/theme/head.hbs new file mode 100644 index 000000000000..66ee37538adf --- /dev/null +++ b/docs/theme/head.hbs @@ -0,0 +1 @@ + diff --git a/etc/contracts-test-data/README.md b/etc/contracts-test-data/README.md deleted file mode 100644 index 532703ad210f..000000000000 --- a/etc/contracts-test-data/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Contracts test data - -This folder contains data for contracts that are being used for testing to check the correctness of the smart contract -flow in ZKsync. diff --git a/etc/contracts-test-data/contracts/basic-constructor/basic-constructor.sol b/etc/contracts-test-data/contracts/basic-constructor/basic-constructor.sol deleted file mode 100644 index d2fe2d0eefb9..000000000000 --- a/etc/contracts-test-data/contracts/basic-constructor/basic-constructor.sol +++ /dev/null @@ -1,16 +0,0 @@ -// SPDX-License-Identifier: MIT OR Apache-2.0 - -pragma solidity ^0.8.0; - -contract SimpleConstructor { - uint256 c; - - constructor(uint256 a, uint256 b, bool shouldRevert) { - c = a * b; - require(!shouldRevert, "reverted deploy"); - } - - function get() public view returns (uint256) { - return c; - } -} diff --git a/etc/contracts-test-data/contracts/create/Foo.sol b/etc/contracts-test-data/contracts/create/Foo.sol deleted file mode 100644 index 1ae4868e5bf6..000000000000 --- a/etc/contracts-test-data/contracts/create/Foo.sol +++ /dev/null @@ -1,8 +0,0 @@ -// SPDX-License-Identifier: MIT - -pragma solidity >=0.8.1; -pragma abicoder v2; - -contract Foo { - string public name = "Foo"; -} diff --git a/etc/contracts-test-data/contracts/create/create.sol b/etc/contracts-test-data/contracts/create/create.sol deleted file mode 100644 index ef03e7c457ce..000000000000 --- a/etc/contracts-test-data/contracts/create/create.sol +++ /dev/null @@ -1,17 +0,0 @@ -// SPDX-License-Identifier: MIT - -pragma solidity >=0.8.1; -pragma abicoder v2; - -// import Foo.sol from current directory -import "./Foo.sol"; - -contract Import { - // Initialize Foo.sol - Foo public foo = new Foo(); - - // Test Foo.sol by getting it's name. - function getFooName() public view returns (string memory) { - return foo.name(); - } -} \ No newline at end of file diff --git a/etc/contracts-test-data/contracts/custom-account/interfaces/IERC20.sol b/etc/contracts-test-data/contracts/custom-account/interfaces/IERC20.sol deleted file mode 100644 index b816bfed0863..000000000000 --- a/etc/contracts-test-data/contracts/custom-account/interfaces/IERC20.sol +++ /dev/null @@ -1,82 +0,0 @@ -// SPDX-License-Identifier: MIT -// OpenZeppelin Contracts (last updated v4.6.0) (token/ERC20/IERC20.sol) - -pragma solidity ^0.8.0; - -/** - * @dev Interface of the ERC20 standard as defined in the EIP. - */ -interface IERC20 { - /** - * @dev Emitted when `value` tokens are moved from one account (`from`) to - * another (`to`). - * - * Note that `value` may be zero. - */ - event Transfer(address indexed from, address indexed to, uint256 value); - - /** - * @dev Emitted when the allowance of a `spender` for an `owner` is set by - * a call to {approve}. `value` is the new allowance. - */ - event Approval(address indexed owner, address indexed spender, uint256 value); - - /** - * @dev Returns the amount of tokens in existence. - */ - function totalSupply() external view returns (uint256); - - /** - * @dev Returns the amount of tokens owned by `account`. - */ - function balanceOf(address account) external view returns (uint256); - - /** - * @dev Moves `amount` tokens from the caller's account to `to`. - * - * Returns a boolean value indicating whether the operation succeeded. - * - * Emits a {Transfer} event. - */ - function transfer(address to, uint256 amount) external returns (bool); - - /** - * @dev Returns the remaining number of tokens that `spender` will be - * allowed to spend on behalf of `owner` through {transferFrom}. This is - * zero by default. - * - * This value changes when {approve} or {transferFrom} are called. - */ - function allowance(address owner, address spender) external view returns (uint256); - - /** - * @dev Sets `amount` as the allowance of `spender` over the caller's tokens. - * - * Returns a boolean value indicating whether the operation succeeded. - * - * IMPORTANT: Beware that changing an allowance with this method brings the risk - * that someone may use both the old and the new allowance by unfortunate - * transaction ordering. One possible solution to mitigate this race - * condition is to first reduce the spender's allowance to 0 and set the - * desired value afterwards: - * https://github.com/ethereum/EIPs/issues/20#issuecomment-263524729 - * - * Emits an {Approval} event. - */ - function approve(address spender, uint256 amount) external returns (bool); - - /** - * @dev Moves `amount` tokens from `from` to `to` using the - * allowance mechanism. `amount` is then deducted from the caller's - * allowance. - * - * Returns a boolean value indicating whether the operation succeeded. - * - * Emits a {Transfer} event. - */ - function transferFrom( - address from, - address to, - uint256 amount - ) external returns (bool); -} diff --git a/etc/contracts-test-data/contracts/estimator/estimator.sol b/etc/contracts-test-data/contracts/estimator/estimator.sol deleted file mode 100644 index 7fc7dfffc64b..000000000000 --- a/etc/contracts-test-data/contracts/estimator/estimator.sol +++ /dev/null @@ -1,30 +0,0 @@ -// SPDX-License-Identifier: UNLICENSED - -// This contract is used to estimate the protocol properties -// related to the fee calculation, such as block capacity -// and different operations costs. - -pragma solidity ^0.8.0; - -// Copied from `contracts/zksync/contracts/L2ContractHelper.sol`. -interface IL2Messenger { - function sendToL1(bytes memory _message) external returns (bytes32); -} - -uint160 constant SYSTEM_CONTRACTS_OFFSET = 0x8000; // 2^15 -IL2Messenger constant L2_MESSENGER = IL2Messenger(address(SYSTEM_CONTRACTS_OFFSET + 0x08)); - -// TODO: Should be set to the actual value (SMA-1185). -// Represents the maximum amount of L2->L1 messages that can happen in one block. -uint256 constant MAX_L2_L1_MESSAGES_IN_BLOCK = 256; - -contract Estimator { - function estimateBlockCapacity() public { - // Block capacity is defined by several parameters, but the "cheapest" way to seal the block - // is to send a limited amount of messages to the L1. - // Here we're going to do just it. - for (uint256 i = 0; i < MAX_L2_L1_MESSAGES_IN_BLOCK; i++) { - L2_MESSENGER.sendToL1(bytes("")); - } - } -} diff --git a/etc/contracts-test-data/contracts/events/events.sol b/etc/contracts-test-data/contracts/events/events.sol deleted file mode 100644 index 93a451d54695..000000000000 --- a/etc/contracts-test-data/contracts/events/events.sol +++ /dev/null @@ -1,15 +0,0 @@ -// SPDX-License-Identifier: MIT - -pragma solidity ^0.8.0; - -contract Emitter { - event Trivial(); - event Simple(uint256 Number, address Account); - event Indexed(uint256 indexed Number, address Account); - - function test(uint256 number) public { - emit Trivial(); - emit Simple(number, address(0xdeadbeef)); - emit Indexed(number, address(0xc0ffee)); - } -} diff --git a/etc/contracts-test-data/contracts/events/sample-calldata b/etc/contracts-test-data/contracts/events/sample-calldata deleted file mode 100644 index c137101ba026..000000000000 Binary files a/etc/contracts-test-data/contracts/events/sample-calldata and /dev/null differ diff --git a/etc/contracts-test-data/contracts/loadnext/loadnext_contract.sol b/etc/contracts-test-data/contracts/loadnext/loadnext_contract.sol deleted file mode 100644 index b14286a45038..000000000000 --- a/etc/contracts-test-data/contracts/loadnext/loadnext_contract.sol +++ /dev/null @@ -1,56 +0,0 @@ -// SPDX-License-Identifier: MIT - -pragma solidity ^0.8.0; -pragma abicoder v2; - -contract LoadnextContract { - event Event(uint val); - uint[] readArray; - uint[] writeArray; - - constructor (uint reads) { - for (uint i = 0; i < reads; i++) { - readArray.push(i); - } - } - - function execute(uint reads, uint writes, uint hashes, uint events, uint max_recursion, uint deploys) external returns(uint) { - if (max_recursion > 0) { - return this.execute(reads, writes, hashes, events, max_recursion - 1, deploys); - } - - uint sum = 0; - - // Somehow use result of storage read for compiler to not optimize this place. - for (uint i = 0; i < reads; i++) { - sum += readArray[i]; - } - - for (uint i = 0; i < writes; i++) { - writeArray.push(i); - } - - for (uint i = 0; i < events; i++) { - emit Event(i); - } - - // Somehow use result of keccak for compiler to not optimize this place. - for (uint i = 0; i < hashes; i++) { - sum += uint8(keccak256(abi.encodePacked("Message for encoding"))[0]); - } - - for (uint i = 0; i < deploys; i++) { - Foo foo = new Foo(); - } - return sum; - } - - function burnGas(uint256 gasToBurn) external { - uint256 initialGas = gasleft(); - while(initialGas - gasleft() < gasToBurn) {} - } -} - -contract Foo { - string public name = "Foo"; -} diff --git a/etc/contracts-test-data/contracts/long-return-data/long-return-data.sol b/etc/contracts-test-data/contracts/long-return-data/long-return-data.sol deleted file mode 100644 index 793bf191cbd8..000000000000 --- a/etc/contracts-test-data/contracts/long-return-data/long-return-data.sol +++ /dev/null @@ -1,13 +0,0 @@ -// SPDX-License-Identifier: UNLICENSED -pragma solidity ^0.8.0; - -contract LongReturnData{ - function longReturnData() external returns (bool, bytes memory) { - // do some recursion, let's have more layers - (bool success, bytes memory _tmp) = this.longReturnData{gas: 79500000}(); - require(success == false); // they should fail by design - assembly { - return(0, 0xffffffffffffffff) - } - } -} diff --git a/etc/contracts-test-data/counter/counter.sol b/etc/contracts-test-data/counter/counter.sol deleted file mode 100644 index ec9219d7a199..000000000000 --- a/etc/contracts-test-data/counter/counter.sol +++ /dev/null @@ -1,27 +0,0 @@ -// SPDX-License-Identifier: UNLICENSED - -pragma solidity ^0.8.0; - -contract Counter { - uint256 value; - - function increment(uint256 x) public { - value += x; - } - - function incrementWithRevertPayable(uint256 x, bool shouldRevert) public payable returns (uint256) { - return incrementWithRevert(x, shouldRevert); - } - - function incrementWithRevert(uint256 x, bool shouldRevert) public returns (uint256) { - value += x; - if (shouldRevert) { - revert("This method always reverts"); - } - return value; - } - - function get() public view returns (uint256) { - return value; - } -} diff --git a/etc/contracts-test-data/hardhat.config.ts b/etc/contracts-test-data/hardhat.config.ts deleted file mode 100644 index 1883c1f6cd4e..000000000000 --- a/etc/contracts-test-data/hardhat.config.ts +++ /dev/null @@ -1,35 +0,0 @@ -import '@matterlabs/hardhat-zksync-solc'; - -const COMPILER_VERSION = '1.5.0'; -const PRE_RELEASE_VERSION = 'prerelease-a167aa3-code4rena'; -function getZksolcUrl(): string { - // @ts-ignore - const platform = { darwin: 'macosx', linux: 'linux', win32: 'windows' }[process.platform]; - // @ts-ignore - const toolchain = { linux: '-musl', win32: '-gnu', darwin: '' }[process.platform]; - const arch = process.arch === 'x64' ? 'amd64' : process.arch; - const ext = process.platform === 'win32' ? '.exe' : ''; - - return `https://github.com/matter-labs/era-compiler-solidity/releases/download/${PRE_RELEASE_VERSION}/zksolc-${platform}-${arch}${toolchain}-v${COMPILER_VERSION}${ext}`; -} - -export default { - zksolc: { - compilerSource: 'binary', - settings: { - compilerPath: getZksolcUrl(), - isSystem: true - } - }, - networks: { - hardhat: { - zksync: true - } - }, - solidity: { - version: '0.8.24', - settings: { - evmVersion: 'cancun' - } - } -}; diff --git a/etc/contracts-test-data/package.json b/etc/contracts-test-data/package.json deleted file mode 100644 index 543a982e4b77..000000000000 --- a/etc/contracts-test-data/package.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "name": "contracts-test-data", - "version": "0.1.0", - "license": "MIT", - "dependencies": { - "@openzeppelin/contracts": "^4.8.0", - "hardhat": "=2.22.2" - }, - "devDependencies": { - "@matterlabs/hardhat-zksync-solc": "^0.3.15" - }, - "scripts": { - "build": "hardhat compile", - "clean": "hardhat clean" - } -} diff --git a/etc/contracts-test-data/yarn.lock b/etc/contracts-test-data/yarn.lock deleted file mode 100644 index 47c70d2d63eb..000000000000 --- a/etc/contracts-test-data/yarn.lock +++ /dev/null @@ -1,2757 +0,0 @@ -# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. -# yarn lockfile v1 - - -"@balena/dockerignore@^1.0.2": - version "1.0.2" - resolved "https://registry.yarnpkg.com/@balena/dockerignore/-/dockerignore-1.0.2.tgz#9ffe4726915251e8eb69f44ef3547e0da2c03e0d" - integrity sha512-wMue2Sy4GAVTk6Ic4tJVcnfdau+gx2EnG7S+uAEe+TWJFqE4YoWN4/H8MSLj4eYJKxGg26lZwboEniNiNwZQ6Q== - -"@chainsafe/as-sha256@^0.3.1": - version "0.3.1" - resolved "https://registry.yarnpkg.com/@chainsafe/as-sha256/-/as-sha256-0.3.1.tgz#3639df0e1435cab03f4d9870cc3ac079e57a6fc9" - integrity sha512-hldFFYuf49ed7DAakWVXSJODuq3pzJEguD8tQ7h+sGkM18vja+OFoJI9krnGmgzyuZC2ETX0NOIcCTy31v2Mtg== - -"@chainsafe/persistent-merkle-tree@^0.4.2": - version "0.4.2" - resolved "https://registry.yarnpkg.com/@chainsafe/persistent-merkle-tree/-/persistent-merkle-tree-0.4.2.tgz#4c9ee80cc57cd3be7208d98c40014ad38f36f7ff" - integrity sha512-lLO3ihKPngXLTus/L7WHKaw9PnNJWizlOF1H9NNzHP6Xvh82vzg9F2bzkXhYIFshMZ2gTCEz8tq6STe7r5NDfQ== - dependencies: - "@chainsafe/as-sha256" "^0.3.1" - -"@chainsafe/persistent-merkle-tree@^0.5.0": - version "0.5.0" - resolved "https://registry.yarnpkg.com/@chainsafe/persistent-merkle-tree/-/persistent-merkle-tree-0.5.0.tgz#2b4a62c9489a5739dedd197250d8d2f5427e9f63" - integrity sha512-l0V1b5clxA3iwQLXP40zYjyZYospQLZXzBVIhhr9kDg/1qHZfzzHw0jj4VPBijfYCArZDlPkRi1wZaV2POKeuw== - dependencies: - "@chainsafe/as-sha256" "^0.3.1" - -"@chainsafe/ssz@^0.10.0": - version "0.10.2" - resolved "https://registry.yarnpkg.com/@chainsafe/ssz/-/ssz-0.10.2.tgz#c782929e1bb25fec66ba72e75934b31fd087579e" - integrity sha512-/NL3Lh8K+0q7A3LsiFq09YXS9fPE+ead2rr7vM2QK8PLzrNsw3uqrif9bpRX5UxgeRjM+vYi+boCM3+GM4ovXg== - dependencies: - "@chainsafe/as-sha256" "^0.3.1" - "@chainsafe/persistent-merkle-tree" "^0.5.0" - -"@chainsafe/ssz@^0.9.2": - version "0.9.4" - resolved "https://registry.yarnpkg.com/@chainsafe/ssz/-/ssz-0.9.4.tgz#696a8db46d6975b600f8309ad3a12f7c0e310497" - integrity sha512-77Qtg2N1ayqs4Bg/wvnWfg5Bta7iy7IRh8XqXh7oNMeP2HBbBwx8m6yTpA8p0EHItWPEBkgZd5S5/LSlp3GXuQ== - dependencies: - "@chainsafe/as-sha256" "^0.3.1" - "@chainsafe/persistent-merkle-tree" "^0.4.2" - case "^1.6.3" - -"@ethersproject/abi@5.7.0", "@ethersproject/abi@^5.1.2", "@ethersproject/abi@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/abi/-/abi-5.7.0.tgz#b3f3e045bbbeed1af3947335c247ad625a44e449" - integrity sha512-351ktp42TiRcYB3H1OP8yajPeAQstMW/yCFokj/AthP9bLHzQFPlOrxOcwYEDkUAICmOHljvN4K39OMTMUa9RA== - dependencies: - "@ethersproject/address" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/constants" "^5.7.0" - "@ethersproject/hash" "^5.7.0" - "@ethersproject/keccak256" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - -"@ethersproject/abstract-provider@5.7.0", "@ethersproject/abstract-provider@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/abstract-provider/-/abstract-provider-5.7.0.tgz#b0a8550f88b6bf9d51f90e4795d48294630cb9ef" - integrity sha512-R41c9UkchKCpAqStMYUpdunjo3pkEvZC3FAwZn5S5MGbXoMQOHIdHItezTETxAO5bevtMApSyEhn9+CHcDsWBw== - dependencies: - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/networks" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/transactions" "^5.7.0" - "@ethersproject/web" "^5.7.0" - -"@ethersproject/abstract-signer@5.7.0", "@ethersproject/abstract-signer@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/abstract-signer/-/abstract-signer-5.7.0.tgz#13f4f32117868452191a4649723cb086d2b596b2" - integrity sha512-a16V8bq1/Cz+TGCkE2OPMTOUDLS3grCpdjoJCYNnVBbdYEMSgKrU0+B90s8b6H+ByYTBZN7a3g76jdIJi7UfKQ== - dependencies: - "@ethersproject/abstract-provider" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - -"@ethersproject/address@5.7.0", "@ethersproject/address@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/address/-/address-5.7.0.tgz#19b56c4d74a3b0a46bfdbb6cfcc0a153fc697f37" - integrity sha512-9wYhYt7aghVGo758POM5nqcOMaE168Q6aRLJZwUmiqSrAungkG74gSSeKEIR7ukixesdRZGPgVqme6vmxs1fkA== - dependencies: - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/keccak256" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/rlp" "^5.7.0" - -"@ethersproject/base64@5.7.0", "@ethersproject/base64@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/base64/-/base64-5.7.0.tgz#ac4ee92aa36c1628173e221d0d01f53692059e1c" - integrity sha512-Dr8tcHt2mEbsZr/mwTPIQAf3Ai0Bks/7gTw9dSqk1mQvhW3XvRlmDJr/4n+wg1JmCl16NZue17CDh8xb/vZ0sQ== - dependencies: - "@ethersproject/bytes" "^5.7.0" - -"@ethersproject/basex@5.7.0", "@ethersproject/basex@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/basex/-/basex-5.7.0.tgz#97034dc7e8938a8ca943ab20f8a5e492ece4020b" - integrity sha512-ywlh43GwZLv2Voc2gQVTKBoVQ1mti3d8HK5aMxsfu/nRDnMmNqaSJ3r3n85HBByT8OpoY96SXM1FogC533T4zw== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - -"@ethersproject/bignumber@5.7.0", "@ethersproject/bignumber@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/bignumber/-/bignumber-5.7.0.tgz#e2f03837f268ba655ffba03a57853e18a18dc9c2" - integrity sha512-n1CAdIHRWjSucQO3MC1zPSVgV/6dy/fjL9pMrPP9peL+QxEg9wOsVqwD4+818B6LUEtaXzVHQiuivzRoxPxUGw== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - bn.js "^5.2.1" - -"@ethersproject/bytes@5.7.0", "@ethersproject/bytes@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/bytes/-/bytes-5.7.0.tgz#a00f6ea8d7e7534d6d87f47188af1148d71f155d" - integrity sha512-nsbxwgFXWh9NyYWo+U8atvmMsSdKJprTcICAkvbBffT75qDocbuggBU0SJiVK2MuTrp0q+xvLkTnGMPK1+uA9A== - dependencies: - "@ethersproject/logger" "^5.7.0" - -"@ethersproject/constants@5.7.0", "@ethersproject/constants@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/constants/-/constants-5.7.0.tgz#df80a9705a7e08984161f09014ea012d1c75295e" - integrity sha512-DHI+y5dBNvkpYUMiRQyxRBYBefZkJfo70VUkUAsRjcPs47muV9evftfZ0PJVCXYbAiCgght0DtcF9srFQmIgWA== - dependencies: - "@ethersproject/bignumber" "^5.7.0" - -"@ethersproject/contracts@5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/contracts/-/contracts-5.7.0.tgz#c305e775abd07e48aa590e1a877ed5c316f8bd1e" - integrity sha512-5GJbzEU3X+d33CdfPhcyS+z8MzsTrBGk/sc+G+59+tPa9yFkl6HQ9D6L0QMgNTA9q8dT0XKxxkyp883XsQvbbg== - dependencies: - "@ethersproject/abi" "^5.7.0" - "@ethersproject/abstract-provider" "^5.7.0" - "@ethersproject/abstract-signer" "^5.7.0" - "@ethersproject/address" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/constants" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/transactions" "^5.7.0" - -"@ethersproject/hash@5.7.0", "@ethersproject/hash@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/hash/-/hash-5.7.0.tgz#eb7aca84a588508369562e16e514b539ba5240a7" - integrity sha512-qX5WrQfnah1EFnO5zJv1v46a8HW0+E5xuBBDTwMFZLuVTx0tbU2kkx15NqdjxecrLGatQN9FGQKpb1FKdHCt+g== - dependencies: - "@ethersproject/abstract-signer" "^5.7.0" - "@ethersproject/address" "^5.7.0" - "@ethersproject/base64" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/keccak256" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - -"@ethersproject/hdnode@5.7.0", "@ethersproject/hdnode@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/hdnode/-/hdnode-5.7.0.tgz#e627ddc6b466bc77aebf1a6b9e47405ca5aef9cf" - integrity sha512-OmyYo9EENBPPf4ERhR7oj6uAtUAhYGqOnIS+jE5pTXvdKBS99ikzq1E7Iv0ZQZ5V36Lqx1qZLeak0Ra16qpeOg== - dependencies: - "@ethersproject/abstract-signer" "^5.7.0" - "@ethersproject/basex" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/pbkdf2" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/sha2" "^5.7.0" - "@ethersproject/signing-key" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - "@ethersproject/transactions" "^5.7.0" - "@ethersproject/wordlists" "^5.7.0" - -"@ethersproject/json-wallets@5.7.0", "@ethersproject/json-wallets@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/json-wallets/-/json-wallets-5.7.0.tgz#5e3355287b548c32b368d91014919ebebddd5360" - integrity sha512-8oee5Xgu6+RKgJTkvEMl2wDgSPSAQ9MB/3JYjFV9jlKvcYHUXZC+cQp0njgmxdHkYWn8s6/IqIZYm0YWCjO/0g== - dependencies: - "@ethersproject/abstract-signer" "^5.7.0" - "@ethersproject/address" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/hdnode" "^5.7.0" - "@ethersproject/keccak256" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/pbkdf2" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/random" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - "@ethersproject/transactions" "^5.7.0" - aes-js "3.0.0" - scrypt-js "3.0.1" - -"@ethersproject/keccak256@5.7.0", "@ethersproject/keccak256@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/keccak256/-/keccak256-5.7.0.tgz#3186350c6e1cd6aba7940384ec7d6d9db01f335a" - integrity sha512-2UcPboeL/iW+pSg6vZ6ydF8tCnv3Iu/8tUmLLzWWGzxWKFFqOBQFLo6uLUv6BDrLgCDfN28RJ/wtByx+jZ4KBg== - dependencies: - "@ethersproject/bytes" "^5.7.0" - js-sha3 "0.8.0" - -"@ethersproject/logger@5.7.0", "@ethersproject/logger@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/logger/-/logger-5.7.0.tgz#6ce9ae168e74fecf287be17062b590852c311892" - integrity sha512-0odtFdXu/XHtjQXJYA3u9G0G8btm0ND5Cu8M7i5vhEcE8/HmF4Lbdqanwyv4uQTr2tx6b7fQRmgLrsnpQlmnig== - -"@ethersproject/networks@5.7.1", "@ethersproject/networks@^5.7.0": - version "5.7.1" - resolved "https://registry.yarnpkg.com/@ethersproject/networks/-/networks-5.7.1.tgz#118e1a981d757d45ccea6bb58d9fd3d9db14ead6" - integrity sha512-n/MufjFYv3yFcUyfhnXotyDlNdFb7onmkSy8aQERi2PjNcnWQ66xXxa3XlS8nCcA8aJKJjIIMNJTC7tu80GwpQ== - dependencies: - "@ethersproject/logger" "^5.7.0" - -"@ethersproject/pbkdf2@5.7.0", "@ethersproject/pbkdf2@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/pbkdf2/-/pbkdf2-5.7.0.tgz#d2267d0a1f6e123f3771007338c47cccd83d3102" - integrity sha512-oR/dBRZR6GTyaofd86DehG72hY6NpAjhabkhxgr3X2FpJtJuodEl2auADWBZfhDHgVCbu3/H/Ocq2uC6dpNjjw== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/sha2" "^5.7.0" - -"@ethersproject/properties@5.7.0", "@ethersproject/properties@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/properties/-/properties-5.7.0.tgz#a6e12cb0439b878aaf470f1902a176033067ed30" - integrity sha512-J87jy8suntrAkIZtecpxEPxY//szqr1mlBaYlQ0r4RCaiD2hjheqF9s1LVE8vVuJCXisjIP+JgtK/Do54ej4Sw== - dependencies: - "@ethersproject/logger" "^5.7.0" - -"@ethersproject/providers@5.7.2", "@ethersproject/providers@^5.7.1", "@ethersproject/providers@^5.7.2": - version "5.7.2" - resolved "https://registry.yarnpkg.com/@ethersproject/providers/-/providers-5.7.2.tgz#f8b1a4f275d7ce58cf0a2eec222269a08beb18cb" - integrity sha512-g34EWZ1WWAVgr4aptGlVBF8mhl3VWjv+8hoAnzStu8Ah22VHBsuGzP17eb6xDVRzw895G4W7vvx60lFFur/1Rg== - dependencies: - "@ethersproject/abstract-provider" "^5.7.0" - "@ethersproject/abstract-signer" "^5.7.0" - "@ethersproject/address" "^5.7.0" - "@ethersproject/base64" "^5.7.0" - "@ethersproject/basex" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/constants" "^5.7.0" - "@ethersproject/hash" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/networks" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/random" "^5.7.0" - "@ethersproject/rlp" "^5.7.0" - "@ethersproject/sha2" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - "@ethersproject/transactions" "^5.7.0" - "@ethersproject/web" "^5.7.0" - bech32 "1.1.4" - ws "7.4.6" - -"@ethersproject/random@5.7.0", "@ethersproject/random@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/random/-/random-5.7.0.tgz#af19dcbc2484aae078bb03656ec05df66253280c" - integrity sha512-19WjScqRA8IIeWclFme75VMXSBvi4e6InrUNuaR4s5pTF2qNhcGdCUwdxUVGtDDqC00sDLCO93jPQoDUH4HVmQ== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - -"@ethersproject/rlp@5.7.0", "@ethersproject/rlp@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/rlp/-/rlp-5.7.0.tgz#de39e4d5918b9d74d46de93af80b7685a9c21304" - integrity sha512-rBxzX2vK8mVF7b0Tol44t5Tb8gomOHkj5guL+HhzQ1yBh/ydjGnpw6at+X6Iw0Kp3OzzzkcKp8N9r0W4kYSs9w== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - -"@ethersproject/sha2@5.7.0", "@ethersproject/sha2@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/sha2/-/sha2-5.7.0.tgz#9a5f7a7824ef784f7f7680984e593a800480c9fb" - integrity sha512-gKlH42riwb3KYp0reLsFTokByAKoJdgFCwI+CCiX/k+Jm2mbNs6oOaCjYQSlI1+XBVejwH2KrmCbMAT/GnRDQw== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - hash.js "1.1.7" - -"@ethersproject/signing-key@5.7.0", "@ethersproject/signing-key@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/signing-key/-/signing-key-5.7.0.tgz#06b2df39411b00bc57c7c09b01d1e41cf1b16ab3" - integrity sha512-MZdy2nL3wO0u7gkB4nA/pEf8lu1TlFswPNmy8AiYkfKTdO6eXBJyUdmHO/ehm/htHw9K/qF8ujnTyUAD+Ry54Q== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - bn.js "^5.2.1" - elliptic "6.5.4" - hash.js "1.1.7" - -"@ethersproject/solidity@5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/solidity/-/solidity-5.7.0.tgz#5e9c911d8a2acce2a5ebb48a5e2e0af20b631cb8" - integrity sha512-HmabMd2Dt/raavyaGukF4XxizWKhKQ24DoLtdNbBmNKUOPqwjsKQSdV9GQtj9CBEea9DlzETlVER1gYeXXBGaA== - dependencies: - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/keccak256" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/sha2" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - -"@ethersproject/strings@5.7.0", "@ethersproject/strings@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/strings/-/strings-5.7.0.tgz#54c9d2a7c57ae8f1205c88a9d3a56471e14d5ed2" - integrity sha512-/9nu+lj0YswRNSH0NXYqrh8775XNyEdUQAuf3f+SmOrnVewcJ5SBNAjF7lpgehKi4abvNNXyf+HX86czCdJ8Mg== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/constants" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - -"@ethersproject/transactions@5.7.0", "@ethersproject/transactions@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/transactions/-/transactions-5.7.0.tgz#91318fc24063e057885a6af13fdb703e1f993d3b" - integrity sha512-kmcNicCp1lp8qanMTC3RIikGgoJ80ztTyvtsFvCYpSCfkjhD0jZ2LOrnbcuxuToLIUYYf+4XwD1rP+B/erDIhQ== - dependencies: - "@ethersproject/address" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/constants" "^5.7.0" - "@ethersproject/keccak256" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/rlp" "^5.7.0" - "@ethersproject/signing-key" "^5.7.0" - -"@ethersproject/units@5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/units/-/units-5.7.0.tgz#637b563d7e14f42deeee39245275d477aae1d8b1" - integrity sha512-pD3xLMy3SJu9kG5xDGI7+xhTEmGXlEqXU4OfNapmfnxLVY4EMSSRp7j1k7eezutBPH7RBN/7QPnwR7hzNlEFeg== - dependencies: - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/constants" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - -"@ethersproject/wallet@5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/wallet/-/wallet-5.7.0.tgz#4e5d0790d96fe21d61d38fb40324e6c7ef350b2d" - integrity sha512-MhmXlJXEJFBFVKrDLB4ZdDzxcBxQ3rLyCkhNqVu3CDYvR97E+8r01UgrI+TI99Le+aYm/in/0vp86guJuM7FCA== - dependencies: - "@ethersproject/abstract-provider" "^5.7.0" - "@ethersproject/abstract-signer" "^5.7.0" - "@ethersproject/address" "^5.7.0" - "@ethersproject/bignumber" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/hash" "^5.7.0" - "@ethersproject/hdnode" "^5.7.0" - "@ethersproject/json-wallets" "^5.7.0" - "@ethersproject/keccak256" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/random" "^5.7.0" - "@ethersproject/signing-key" "^5.7.0" - "@ethersproject/transactions" "^5.7.0" - "@ethersproject/wordlists" "^5.7.0" - -"@ethersproject/web@5.7.1", "@ethersproject/web@^5.7.0": - version "5.7.1" - resolved "https://registry.yarnpkg.com/@ethersproject/web/-/web-5.7.1.tgz#de1f285b373149bee5928f4eb7bcb87ee5fbb4ae" - integrity sha512-Gueu8lSvyjBWL4cYsWsjh6MtMwM0+H4HvqFPZfB6dV8ctbP9zFAO73VG1cMWae0FLPCtz0peKPpZY8/ugJJX2w== - dependencies: - "@ethersproject/base64" "^5.7.0" - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - -"@ethersproject/wordlists@5.7.0", "@ethersproject/wordlists@^5.7.0": - version "5.7.0" - resolved "https://registry.yarnpkg.com/@ethersproject/wordlists/-/wordlists-5.7.0.tgz#8fb2c07185d68c3e09eb3bfd6e779ba2774627f5" - integrity sha512-S2TFNJNfHWVHNE6cNDjbVlZ6MgE17MIxMbMg2zv3wn+3XSJGosL1m9ZVv3GXCf/2ymSsQ+hRI5IzoMJTG6aoVA== - dependencies: - "@ethersproject/bytes" "^5.7.0" - "@ethersproject/hash" "^5.7.0" - "@ethersproject/logger" "^5.7.0" - "@ethersproject/properties" "^5.7.0" - "@ethersproject/strings" "^5.7.0" - -"@fastify/busboy@^2.0.0": - version "2.0.0" - resolved "https://registry.yarnpkg.com/@fastify/busboy/-/busboy-2.0.0.tgz#f22824caff3ae506b18207bad4126dbc6ccdb6b8" - integrity sha512-JUFJad5lv7jxj926GPgymrWQxxjPYuJNiNjNMzqT+HiuP6Vl3dk5xzG+8sTX96np0ZAluvaMzPsjhHZ5rNuNQQ== - -"@matterlabs/hardhat-zksync-solc@^0.3.15": - version "0.3.17" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-0.3.17.tgz#72f199544dc89b268d7bfc06d022a311042752fd" - integrity sha512-aZgQ0yfXW5xPkfuEH1d44ncWV4T2LzKZd0VVPo4PL5cUrYs2/II1FaEDp5zsf3FxOR1xT3mBsjuSrtJkk4AL8Q== - dependencies: - "@nomiclabs/hardhat-docker" "^2.0.0" - chalk "4.1.2" - dockerode "^3.3.4" - -"@metamask/eth-sig-util@^4.0.0": - version "4.0.1" - resolved "https://registry.yarnpkg.com/@metamask/eth-sig-util/-/eth-sig-util-4.0.1.tgz#3ad61f6ea9ad73ba5b19db780d40d9aae5157088" - integrity sha512-tghyZKLHZjcdlDqCA3gNZmLeR0XvOE9U1qoQO9ohyAZT6Pya+H9vkBPcsyXytmYLNgVoin7CKCmweo/R43V+tQ== - dependencies: - ethereumjs-abi "^0.6.8" - ethereumjs-util "^6.2.1" - ethjs-util "^0.1.6" - tweetnacl "^1.0.3" - tweetnacl-util "^0.15.1" - -"@noble/hashes@1.2.0", "@noble/hashes@~1.2.0": - version "1.2.0" - resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.2.0.tgz#a3150eeb09cc7ab207ebf6d7b9ad311a9bdbed12" - integrity sha512-FZfhjEDbT5GRswV3C6uvLPHMiVD6lQBmpoX5+eSiPaMTXte/IKqI5dykDxzZB/WBeK/CDuQRBWarPdi3FNY2zQ== - -"@noble/secp256k1@1.7.1", "@noble/secp256k1@~1.7.0": - version "1.7.1" - resolved "https://registry.yarnpkg.com/@noble/secp256k1/-/secp256k1-1.7.1.tgz#b251c70f824ce3ca7f8dc3df08d58f005cc0507c" - integrity sha512-hOUk6AyBFmqVrv7k5WAw/LpszxVbj9gGN4JRkIX52fdFAj1UA61KXmZDvqVEm+pOyec3+fIeZB02LYa/pWOArw== - -"@nomicfoundation/ethereumjs-block@5.0.1": - version "5.0.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/ethereumjs-block/-/ethereumjs-block-5.0.1.tgz#6f89664f55febbd723195b6d0974773d29ee133d" - integrity sha512-u1Yioemi6Ckj3xspygu/SfFvm8vZEO8/Yx5a1QLzi6nVU0jz3Pg2OmHKJ5w+D9Ogk1vhwRiqEBAqcb0GVhCyHw== - dependencies: - "@nomicfoundation/ethereumjs-common" "4.0.1" - "@nomicfoundation/ethereumjs-rlp" "5.0.1" - "@nomicfoundation/ethereumjs-trie" "6.0.1" - "@nomicfoundation/ethereumjs-tx" "5.0.1" - "@nomicfoundation/ethereumjs-util" "9.0.1" - ethereum-cryptography "0.1.3" - ethers "^5.7.1" - -"@nomicfoundation/ethereumjs-blockchain@7.0.1": - version "7.0.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/ethereumjs-blockchain/-/ethereumjs-blockchain-7.0.1.tgz#80e0bd3535bfeb9baa29836b6f25123dab06a726" - integrity sha512-NhzndlGg829XXbqJEYrF1VeZhAwSPgsK/OB7TVrdzft3y918hW5KNd7gIZ85sn6peDZOdjBsAXIpXZ38oBYE5A== - dependencies: - "@nomicfoundation/ethereumjs-block" "5.0.1" - "@nomicfoundation/ethereumjs-common" "4.0.1" - "@nomicfoundation/ethereumjs-ethash" "3.0.1" - "@nomicfoundation/ethereumjs-rlp" "5.0.1" - "@nomicfoundation/ethereumjs-trie" "6.0.1" - "@nomicfoundation/ethereumjs-tx" "5.0.1" - "@nomicfoundation/ethereumjs-util" "9.0.1" - abstract-level "^1.0.3" - debug "^4.3.3" - ethereum-cryptography "0.1.3" - level "^8.0.0" - lru-cache "^5.1.1" - memory-level "^1.0.0" - -"@nomicfoundation/ethereumjs-common@4.0.1": - version "4.0.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/ethereumjs-common/-/ethereumjs-common-4.0.1.tgz#4702d82df35b07b5407583b54a45bf728e46a2f0" - integrity sha512-OBErlkfp54GpeiE06brBW/TTbtbuBJV5YI5Nz/aB2evTDo+KawyEzPjBlSr84z/8MFfj8wS2wxzQX1o32cev5g== - dependencies: - "@nomicfoundation/ethereumjs-util" "9.0.1" - crc-32 "^1.2.0" - -"@nomicfoundation/ethereumjs-ethash@3.0.1": - version "3.0.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/ethereumjs-ethash/-/ethereumjs-ethash-3.0.1.tgz#65ca494d53e71e8415c9a49ef48bc921c538fc41" - integrity sha512-KDjGIB5igzWOp8Ik5I6QiRH5DH+XgILlplsHR7TEuWANZA759G6krQ6o8bvj+tRUz08YygMQu/sGd9mJ1DYT8w== - dependencies: - "@nomicfoundation/ethereumjs-block" "5.0.1" - "@nomicfoundation/ethereumjs-rlp" "5.0.1" - "@nomicfoundation/ethereumjs-util" "9.0.1" - abstract-level "^1.0.3" - bigint-crypto-utils "^3.0.23" - ethereum-cryptography "0.1.3" - -"@nomicfoundation/ethereumjs-evm@2.0.1": - version "2.0.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/ethereumjs-evm/-/ethereumjs-evm-2.0.1.tgz#f35681e203363f69ce2b3d3bf9f44d4e883ca1f1" - integrity sha512-oL8vJcnk0Bx/onl+TgQOQ1t/534GKFaEG17fZmwtPFeH8S5soiBYPCLUrvANOl4sCp9elYxIMzIiTtMtNNN8EQ== - dependencies: - "@ethersproject/providers" "^5.7.1" - "@nomicfoundation/ethereumjs-common" "4.0.1" - "@nomicfoundation/ethereumjs-tx" "5.0.1" - "@nomicfoundation/ethereumjs-util" "9.0.1" - debug "^4.3.3" - ethereum-cryptography "0.1.3" - mcl-wasm "^0.7.1" - rustbn.js "~0.2.0" - -"@nomicfoundation/ethereumjs-rlp@5.0.1": - version "5.0.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/ethereumjs-rlp/-/ethereumjs-rlp-5.0.1.tgz#0b30c1cf77d125d390408e391c4bb5291ef43c28" - integrity sha512-xtxrMGa8kP4zF5ApBQBtjlSbN5E2HI8m8FYgVSYAnO6ssUoY5pVPGy2H8+xdf/bmMa22Ce8nWMH3aEW8CcqMeQ== - -"@nomicfoundation/ethereumjs-statemanager@2.0.1": - version "2.0.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/ethereumjs-statemanager/-/ethereumjs-statemanager-2.0.1.tgz#8824a97938db4471911e2d2f140f79195def5935" - integrity sha512-B5ApMOnlruVOR7gisBaYwFX+L/AP7i/2oAahatssjPIBVDF6wTX1K7Qpa39E/nzsH8iYuL3krkYeUFIdO3EMUQ== - dependencies: - "@nomicfoundation/ethereumjs-common" "4.0.1" - "@nomicfoundation/ethereumjs-rlp" "5.0.1" - debug "^4.3.3" - ethereum-cryptography "0.1.3" - ethers "^5.7.1" - js-sdsl "^4.1.4" - -"@nomicfoundation/ethereumjs-trie@6.0.1": - version "6.0.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/ethereumjs-trie/-/ethereumjs-trie-6.0.1.tgz#662c55f6b50659fd4b22ea9f806a7401cafb7717" - integrity sha512-A64It/IMpDVODzCgxDgAAla8jNjNtsoQZIzZUfIV5AY6Coi4nvn7+VReBn5itlxMiL2yaTlQr9TRWp3CSI6VoA== - dependencies: - "@nomicfoundation/ethereumjs-rlp" "5.0.1" - "@nomicfoundation/ethereumjs-util" "9.0.1" - "@types/readable-stream" "^2.3.13" - ethereum-cryptography "0.1.3" - readable-stream "^3.6.0" - -"@nomicfoundation/ethereumjs-tx@5.0.1": - version "5.0.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/ethereumjs-tx/-/ethereumjs-tx-5.0.1.tgz#7629dc2036b4a33c34e9f0a592b43227ef4f0c7d" - integrity sha512-0HwxUF2u2hrsIM1fsasjXvlbDOq1ZHFV2dd1yGq8CA+MEYhaxZr8OTScpVkkxqMwBcc5y83FyPl0J9MZn3kY0w== - dependencies: - "@chainsafe/ssz" "^0.9.2" - "@ethersproject/providers" "^5.7.2" - "@nomicfoundation/ethereumjs-common" "4.0.1" - "@nomicfoundation/ethereumjs-rlp" "5.0.1" - "@nomicfoundation/ethereumjs-util" "9.0.1" - ethereum-cryptography "0.1.3" - -"@nomicfoundation/ethereumjs-util@9.0.1": - version "9.0.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/ethereumjs-util/-/ethereumjs-util-9.0.1.tgz#530cda8bae33f8b5020a8f199ed1d0a2ce48ec89" - integrity sha512-TwbhOWQ8QoSCFhV/DDfSmyfFIHjPjFBj957219+V3jTZYZ2rf9PmDtNOeZWAE3p3vlp8xb02XGpd0v6nTUPbsA== - dependencies: - "@chainsafe/ssz" "^0.10.0" - "@nomicfoundation/ethereumjs-rlp" "5.0.1" - ethereum-cryptography "0.1.3" - -"@nomicfoundation/ethereumjs-vm@7.0.1": - version "7.0.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/ethereumjs-vm/-/ethereumjs-vm-7.0.1.tgz#7d035e0993bcad10716c8b36e61dfb87fa3ca05f" - integrity sha512-rArhyn0jPsS/D+ApFsz3yVJMQ29+pVzNZ0VJgkzAZ+7FqXSRtThl1C1prhmlVr3YNUlfpZ69Ak+RUT4g7VoOuQ== - dependencies: - "@nomicfoundation/ethereumjs-block" "5.0.1" - "@nomicfoundation/ethereumjs-blockchain" "7.0.1" - "@nomicfoundation/ethereumjs-common" "4.0.1" - "@nomicfoundation/ethereumjs-evm" "2.0.1" - "@nomicfoundation/ethereumjs-rlp" "5.0.1" - "@nomicfoundation/ethereumjs-statemanager" "2.0.1" - "@nomicfoundation/ethereumjs-trie" "6.0.1" - "@nomicfoundation/ethereumjs-tx" "5.0.1" - "@nomicfoundation/ethereumjs-util" "9.0.1" - debug "^4.3.3" - ethereum-cryptography "0.1.3" - mcl-wasm "^0.7.1" - rustbn.js "~0.2.0" - -"@nomicfoundation/solidity-analyzer-darwin-arm64@0.1.1": - version "0.1.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/solidity-analyzer-darwin-arm64/-/solidity-analyzer-darwin-arm64-0.1.1.tgz#4c858096b1c17fe58a474fe81b46815f93645c15" - integrity sha512-KcTodaQw8ivDZyF+D76FokN/HdpgGpfjc/gFCImdLUyqB6eSWVaZPazMbeAjmfhx3R0zm/NYVzxwAokFKgrc0w== - -"@nomicfoundation/solidity-analyzer-darwin-x64@0.1.1": - version "0.1.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/solidity-analyzer-darwin-x64/-/solidity-analyzer-darwin-x64-0.1.1.tgz#6e25ccdf6e2d22389c35553b64fe6f3fdaec432c" - integrity sha512-XhQG4BaJE6cIbjAVtzGOGbK3sn1BO9W29uhk9J8y8fZF1DYz0Doj8QDMfpMu+A6TjPDs61lbsmeYodIDnfveSA== - -"@nomicfoundation/solidity-analyzer-freebsd-x64@0.1.1": - version "0.1.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/solidity-analyzer-freebsd-x64/-/solidity-analyzer-freebsd-x64-0.1.1.tgz#0a224ea50317139caeebcdedd435c28a039d169c" - integrity sha512-GHF1VKRdHW3G8CndkwdaeLkVBi5A9u2jwtlS7SLhBc8b5U/GcoL39Q+1CSO3hYqePNP+eV5YI7Zgm0ea6kMHoA== - -"@nomicfoundation/solidity-analyzer-linux-arm64-gnu@0.1.1": - version "0.1.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/solidity-analyzer-linux-arm64-gnu/-/solidity-analyzer-linux-arm64-gnu-0.1.1.tgz#dfa085d9ffab9efb2e7b383aed3f557f7687ac2b" - integrity sha512-g4Cv2fO37ZsUENQ2vwPnZc2zRenHyAxHcyBjKcjaSmmkKrFr64yvzeNO8S3GBFCo90rfochLs99wFVGT/0owpg== - -"@nomicfoundation/solidity-analyzer-linux-arm64-musl@0.1.1": - version "0.1.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/solidity-analyzer-linux-arm64-musl/-/solidity-analyzer-linux-arm64-musl-0.1.1.tgz#c9e06b5d513dd3ab02a7ac069c160051675889a4" - integrity sha512-WJ3CE5Oek25OGE3WwzK7oaopY8xMw9Lhb0mlYuJl/maZVo+WtP36XoQTb7bW/i8aAdHW5Z+BqrHMux23pvxG3w== - -"@nomicfoundation/solidity-analyzer-linux-x64-gnu@0.1.1": - version "0.1.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/solidity-analyzer-linux-x64-gnu/-/solidity-analyzer-linux-x64-gnu-0.1.1.tgz#8d328d16839e52571f72f2998c81e46bf320f893" - integrity sha512-5WN7leSr5fkUBBjE4f3wKENUy9HQStu7HmWqbtknfXkkil+eNWiBV275IOlpXku7v3uLsXTOKpnnGHJYI2qsdA== - -"@nomicfoundation/solidity-analyzer-linux-x64-musl@0.1.1": - version "0.1.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/solidity-analyzer-linux-x64-musl/-/solidity-analyzer-linux-x64-musl-0.1.1.tgz#9b49d0634b5976bb5ed1604a1e1b736f390959bb" - integrity sha512-KdYMkJOq0SYPQMmErv/63CwGwMm5XHenEna9X9aB8mQmhDBrYrlAOSsIPgFCUSL0hjxE3xHP65/EPXR/InD2+w== - -"@nomicfoundation/solidity-analyzer-win32-arm64-msvc@0.1.1": - version "0.1.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/solidity-analyzer-win32-arm64-msvc/-/solidity-analyzer-win32-arm64-msvc-0.1.1.tgz#e2867af7264ebbcc3131ef837878955dd6a3676f" - integrity sha512-VFZASBfl4qiBYwW5xeY20exWhmv6ww9sWu/krWSesv3q5hA0o1JuzmPHR4LPN6SUZj5vcqci0O6JOL8BPw+APg== - -"@nomicfoundation/solidity-analyzer-win32-ia32-msvc@0.1.1": - version "0.1.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/solidity-analyzer-win32-ia32-msvc/-/solidity-analyzer-win32-ia32-msvc-0.1.1.tgz#0685f78608dd516c8cdfb4896ed451317e559585" - integrity sha512-JnFkYuyCSA70j6Si6cS1A9Gh1aHTEb8kOTBApp/c7NRTFGNMH8eaInKlyuuiIbvYFhlXW4LicqyYuWNNq9hkpQ== - -"@nomicfoundation/solidity-analyzer-win32-x64-msvc@0.1.1": - version "0.1.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/solidity-analyzer-win32-x64-msvc/-/solidity-analyzer-win32-x64-msvc-0.1.1.tgz#c9a44f7108646f083b82e851486e0f6aeb785836" - integrity sha512-HrVJr6+WjIXGnw3Q9u6KQcbZCtk0caVWhCdFADySvRyUxJ8PnzlaP+MhwNE8oyT8OZ6ejHBRrrgjSqDCFXGirw== - -"@nomicfoundation/solidity-analyzer@^0.1.0": - version "0.1.1" - resolved "https://registry.yarnpkg.com/@nomicfoundation/solidity-analyzer/-/solidity-analyzer-0.1.1.tgz#f5f4d36d3f66752f59a57e7208cd856f3ddf6f2d" - integrity sha512-1LMtXj1puAxyFusBgUIy5pZk3073cNXYnXUpuNKFghHbIit/xZgbk0AokpUADbNm3gyD6bFWl3LRFh3dhVdREg== - optionalDependencies: - "@nomicfoundation/solidity-analyzer-darwin-arm64" "0.1.1" - "@nomicfoundation/solidity-analyzer-darwin-x64" "0.1.1" - "@nomicfoundation/solidity-analyzer-freebsd-x64" "0.1.1" - "@nomicfoundation/solidity-analyzer-linux-arm64-gnu" "0.1.1" - "@nomicfoundation/solidity-analyzer-linux-arm64-musl" "0.1.1" - "@nomicfoundation/solidity-analyzer-linux-x64-gnu" "0.1.1" - "@nomicfoundation/solidity-analyzer-linux-x64-musl" "0.1.1" - "@nomicfoundation/solidity-analyzer-win32-arm64-msvc" "0.1.1" - "@nomicfoundation/solidity-analyzer-win32-ia32-msvc" "0.1.1" - "@nomicfoundation/solidity-analyzer-win32-x64-msvc" "0.1.1" - -"@nomiclabs/hardhat-docker@^2.0.0": - version "2.0.2" - resolved "https://registry.yarnpkg.com/@nomiclabs/hardhat-docker/-/hardhat-docker-2.0.2.tgz#ae964be17951275a55859ff7358e9e7c77448846" - integrity sha512-XgGEpRT3wlA1VslyB57zyAHV+oll8KnV1TjwnxxC1tpAL04/lbdwpdO5KxInVN8irMSepqFpsiSkqlcnvbE7Ng== - dependencies: - dockerode "^2.5.8" - fs-extra "^7.0.1" - node-fetch "^2.6.0" - -"@openzeppelin/contracts@^4.8.0": - version "4.9.3" - resolved "https://registry.yarnpkg.com/@openzeppelin/contracts/-/contracts-4.9.3.tgz#00d7a8cf35a475b160b3f0293a6403c511099364" - integrity sha512-He3LieZ1pP2TNt5JbkPA4PNT9WC3gOTOlDcFGJW4Le4QKqwmiNJCRt44APfxMxvq7OugU/cqYuPcSBzOw38DAg== - -"@scure/base@~1.1.0": - version "1.1.3" - resolved "https://registry.yarnpkg.com/@scure/base/-/base-1.1.3.tgz#8584115565228290a6c6c4961973e0903bb3df2f" - integrity sha512-/+SgoRjLq7Xlf0CWuLHq2LUZeL/w65kfzAPG5NH9pcmBhs+nunQTn4gvdwgMTIXnt9b2C/1SeL2XiysZEyIC9Q== - -"@scure/bip32@1.1.5": - version "1.1.5" - resolved "https://registry.yarnpkg.com/@scure/bip32/-/bip32-1.1.5.tgz#d2ccae16dcc2e75bc1d75f5ef3c66a338d1ba300" - integrity sha512-XyNh1rB0SkEqd3tXcXMi+Xe1fvg+kUIcoRIEujP1Jgv7DqW2r9lg3Ah0NkFaCs9sTkQAQA8kw7xiRXzENi9Rtw== - dependencies: - "@noble/hashes" "~1.2.0" - "@noble/secp256k1" "~1.7.0" - "@scure/base" "~1.1.0" - -"@scure/bip39@1.1.1": - version "1.1.1" - resolved "https://registry.yarnpkg.com/@scure/bip39/-/bip39-1.1.1.tgz#b54557b2e86214319405db819c4b6a370cf340c5" - integrity sha512-t+wDck2rVkh65Hmv280fYdVdY25J9YeEUIgn2LG1WM6gxFkGzcksoDiUkWVpVp3Oex9xGC68JU2dSbUfwZ2jPg== - dependencies: - "@noble/hashes" "~1.2.0" - "@scure/base" "~1.1.0" - -"@sentry/core@5.30.0": - version "5.30.0" - resolved "https://registry.yarnpkg.com/@sentry/core/-/core-5.30.0.tgz#6b203664f69e75106ee8b5a2fe1d717379b331f3" - integrity sha512-TmfrII8w1PQZSZgPpUESqjB+jC6MvZJZdLtE/0hZ+SrnKhW3x5WlYLvTXZpcWePYBku7rl2wn1RZu6uT0qCTeg== - dependencies: - "@sentry/hub" "5.30.0" - "@sentry/minimal" "5.30.0" - "@sentry/types" "5.30.0" - "@sentry/utils" "5.30.0" - tslib "^1.9.3" - -"@sentry/hub@5.30.0": - version "5.30.0" - resolved "https://registry.yarnpkg.com/@sentry/hub/-/hub-5.30.0.tgz#2453be9b9cb903404366e198bd30c7ca74cdc100" - integrity sha512-2tYrGnzb1gKz2EkMDQcfLrDTvmGcQPuWxLnJKXJvYTQDGLlEvi2tWz1VIHjunmOvJrB5aIQLhm+dcMRwFZDCqQ== - dependencies: - "@sentry/types" "5.30.0" - "@sentry/utils" "5.30.0" - tslib "^1.9.3" - -"@sentry/minimal@5.30.0": - version "5.30.0" - resolved "https://registry.yarnpkg.com/@sentry/minimal/-/minimal-5.30.0.tgz#ce3d3a6a273428e0084adcb800bc12e72d34637b" - integrity sha512-BwWb/owZKtkDX+Sc4zCSTNcvZUq7YcH3uAVlmh/gtR9rmUvbzAA3ewLuB3myi4wWRAMEtny6+J/FN/x+2wn9Xw== - dependencies: - "@sentry/hub" "5.30.0" - "@sentry/types" "5.30.0" - tslib "^1.9.3" - -"@sentry/node@^5.18.1": - version "5.30.0" - resolved "https://registry.yarnpkg.com/@sentry/node/-/node-5.30.0.tgz#4ca479e799b1021285d7fe12ac0858951c11cd48" - integrity sha512-Br5oyVBF0fZo6ZS9bxbJZG4ApAjRqAnqFFurMVJJdunNb80brh7a5Qva2kjhm+U6r9NJAB5OmDyPkA1Qnt+QVg== - dependencies: - "@sentry/core" "5.30.0" - "@sentry/hub" "5.30.0" - "@sentry/tracing" "5.30.0" - "@sentry/types" "5.30.0" - "@sentry/utils" "5.30.0" - cookie "^0.4.1" - https-proxy-agent "^5.0.0" - lru_map "^0.3.3" - tslib "^1.9.3" - -"@sentry/tracing@5.30.0": - version "5.30.0" - resolved "https://registry.yarnpkg.com/@sentry/tracing/-/tracing-5.30.0.tgz#501d21f00c3f3be7f7635d8710da70d9419d4e1f" - integrity sha512-dUFowCr0AIMwiLD7Fs314Mdzcug+gBVo/+NCMyDw8tFxJkwWAKl7Qa2OZxLQ0ZHjakcj1hNKfCQJ9rhyfOl4Aw== - dependencies: - "@sentry/hub" "5.30.0" - "@sentry/minimal" "5.30.0" - "@sentry/types" "5.30.0" - "@sentry/utils" "5.30.0" - tslib "^1.9.3" - -"@sentry/types@5.30.0": - version "5.30.0" - resolved "https://registry.yarnpkg.com/@sentry/types/-/types-5.30.0.tgz#19709bbe12a1a0115bc790b8942917da5636f402" - integrity sha512-R8xOqlSTZ+htqrfteCWU5Nk0CDN5ApUTvrlvBuiH1DyP6czDZ4ktbZB0hAgBlVcK0U+qpD3ag3Tqqpa5Q67rPw== - -"@sentry/utils@5.30.0": - version "5.30.0" - resolved "https://registry.yarnpkg.com/@sentry/utils/-/utils-5.30.0.tgz#9a5bd7ccff85ccfe7856d493bffa64cabc41e980" - integrity sha512-zaYmoH0NWWtvnJjC9/CBseXMtKHm/tm40sz3YfJRxeQjyzRqNQPgivpd9R/oDJCYj999mzdW382p/qi2ypjLww== - dependencies: - "@sentry/types" "5.30.0" - tslib "^1.9.3" - -"@types/bn.js@^4.11.3": - version "4.11.6" - resolved "https://registry.yarnpkg.com/@types/bn.js/-/bn.js-4.11.6.tgz#c306c70d9358aaea33cd4eda092a742b9505967c" - integrity sha512-pqr857jrp2kPuO9uRjZ3PwnJTjoQy+fcdxvBTvHm6dkmEL9q+hDD/2j/0ELOBPtPnS8LjCX0gI9nbl8lVkadpg== - dependencies: - "@types/node" "*" - -"@types/bn.js@^5.1.0": - version "5.1.5" - resolved "https://registry.yarnpkg.com/@types/bn.js/-/bn.js-5.1.5.tgz#2e0dacdcce2c0f16b905d20ff87aedbc6f7b4bf0" - integrity sha512-V46N0zwKRF5Q00AZ6hWtN0T8gGmDUaUzLWQvHFo5yThtVwK/VCenFY3wXVbOvNfajEpsTfQM4IN9k/d6gUVX3A== - dependencies: - "@types/node" "*" - -"@types/lru-cache@^5.1.0": - version "5.1.1" - resolved "https://registry.yarnpkg.com/@types/lru-cache/-/lru-cache-5.1.1.tgz#c48c2e27b65d2a153b19bfc1a317e30872e01eef" - integrity sha512-ssE3Vlrys7sdIzs5LOxCzTVMsU7i9oa/IaW92wF32JFb3CVczqOkru2xspuKczHEbG3nvmPY7IFqVmGGHdNbYw== - -"@types/node@*": - version "20.9.0" - resolved "https://registry.yarnpkg.com/@types/node/-/node-20.9.0.tgz#bfcdc230583aeb891cf51e73cfdaacdd8deae298" - integrity sha512-nekiGu2NDb1BcVofVcEKMIwzlx4NjHlcjhoxxKBNLtz15Y1z7MYf549DFvkHSId02Ax6kGwWntIBPC3l/JZcmw== - dependencies: - undici-types "~5.26.4" - -"@types/pbkdf2@^3.0.0": - version "3.1.2" - resolved "https://registry.yarnpkg.com/@types/pbkdf2/-/pbkdf2-3.1.2.tgz#2dc43808e9985a2c69ff02e2d2027bd4fe33e8dc" - integrity sha512-uRwJqmiXmh9++aSu1VNEn3iIxWOhd8AHXNSdlaLfdAAdSTY9jYVeGWnzejM3dvrkbqE3/hyQkQQ29IFATEGlew== - dependencies: - "@types/node" "*" - -"@types/readable-stream@^2.3.13": - version "2.3.15" - resolved "https://registry.yarnpkg.com/@types/readable-stream/-/readable-stream-2.3.15.tgz#3d79c9ceb1b6a57d5f6e6976f489b9b5384321ae" - integrity sha512-oM5JSKQCcICF1wvGgmecmHldZ48OZamtMxcGGVICOJA8o8cahXC1zEVAif8iwoc5j8etxFaRFnf095+CDsuoFQ== - dependencies: - "@types/node" "*" - safe-buffer "~5.1.1" - -"@types/secp256k1@^4.0.1": - version "4.0.6" - resolved "https://registry.yarnpkg.com/@types/secp256k1/-/secp256k1-4.0.6.tgz#d60ba2349a51c2cbc5e816dcd831a42029d376bf" - integrity sha512-hHxJU6PAEUn0TP4S/ZOzuTUvJWuZ6eIKeNKb5RBpODvSl6hp1Wrw4s7ATY50rklRCScUDpHzVA/DQdSjJ3UoYQ== - dependencies: - "@types/node" "*" - -JSONStream@1.3.2: - version "1.3.2" - resolved "https://registry.yarnpkg.com/JSONStream/-/JSONStream-1.3.2.tgz#c102371b6ec3a7cf3b847ca00c20bb0fce4c6dea" - integrity sha512-mn0KSip7N4e0UDPZHnqDsHECo5uGQrixQKnAskOM1BIB8hd7QKbd6il8IPRPudPHOeHiECoCFqhyMaRO9+nWyA== - dependencies: - jsonparse "^1.2.0" - through ">=2.2.7 <3" - -abort-controller@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/abort-controller/-/abort-controller-3.0.0.tgz#eaf54d53b62bae4138e809ca225c8439a6efb392" - integrity sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg== - dependencies: - event-target-shim "^5.0.0" - -abstract-level@^1.0.0, abstract-level@^1.0.2, abstract-level@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/abstract-level/-/abstract-level-1.0.3.tgz#78a67d3d84da55ee15201486ab44c09560070741" - integrity sha512-t6jv+xHy+VYwc4xqZMn2Pa9DjcdzvzZmQGRjTFc8spIbRGHgBrEKbPq+rYXc7CCo0lxgYvSgKVg9qZAhpVQSjA== - dependencies: - buffer "^6.0.3" - catering "^2.1.0" - is-buffer "^2.0.5" - level-supports "^4.0.0" - level-transcoder "^1.0.1" - module-error "^1.0.1" - queue-microtask "^1.2.3" - -adm-zip@^0.4.16: - version "0.4.16" - resolved "https://registry.yarnpkg.com/adm-zip/-/adm-zip-0.4.16.tgz#cf4c508fdffab02c269cbc7f471a875f05570365" - integrity sha512-TFi4HBKSGfIKsK5YCkKaaFG2m4PEDyViZmEwof3MTIgzimHLto6muaHVpbrljdIvIrFZzEq/p4nafOeLcYegrg== - -aes-js@3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/aes-js/-/aes-js-3.0.0.tgz#e21df10ad6c2053295bcbb8dab40b09dbea87e4d" - integrity sha512-H7wUZRn8WpTq9jocdxQ2c8x2sKo9ZVmzfRE13GiNJXfp7NcKYEdvl3vspKjXox6RIG2VtaRe4JFvxG4rqp2Zuw== - -agent-base@6: - version "6.0.2" - resolved "https://registry.yarnpkg.com/agent-base/-/agent-base-6.0.2.tgz#49fff58577cfee3f37176feab4c22e00f86d7f77" - integrity sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ== - dependencies: - debug "4" - -aggregate-error@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-3.1.0.tgz#92670ff50f5359bdb7a3e0d40d0ec30c5737687a" - integrity sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA== - dependencies: - clean-stack "^2.0.0" - indent-string "^4.0.0" - -ansi-colors@4.1.1: - version "4.1.1" - resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-4.1.1.tgz#cbb9ae256bf750af1eab344f229aa27fe94ba348" - integrity sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA== - -ansi-colors@^4.1.1: - version "4.1.3" - resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-4.1.3.tgz#37611340eb2243e70cc604cad35d63270d48781b" - integrity sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw== - -ansi-escapes@^4.3.0: - version "4.3.2" - resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-4.3.2.tgz#6b2291d1db7d98b6521d5f1efa42d0f3a9feb65e" - integrity sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ== - dependencies: - type-fest "^0.21.3" - -ansi-regex@^5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" - integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== - -ansi-styles@^3.2.1: - version "3.2.1" - resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" - integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== - dependencies: - color-convert "^1.9.0" - -ansi-styles@^4.0.0, ansi-styles@^4.1.0: - version "4.3.0" - resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" - integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== - dependencies: - color-convert "^2.0.1" - -anymatch@~3.1.2: - version "3.1.3" - resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.3.tgz#790c58b19ba1720a84205b57c618d5ad8524973e" - integrity sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw== - dependencies: - normalize-path "^3.0.0" - picomatch "^2.0.4" - -argparse@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38" - integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== - -asn1@^0.2.6: - version "0.2.6" - resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.6.tgz#0d3a7bb6e64e02a90c0303b31f292868ea09a08d" - integrity sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ== - dependencies: - safer-buffer "~2.1.0" - -balanced-match@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" - integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== - -base-x@^3.0.2: - version "3.0.9" - resolved "https://registry.yarnpkg.com/base-x/-/base-x-3.0.9.tgz#6349aaabb58526332de9f60995e548a53fe21320" - integrity sha512-H7JU6iBHTal1gp56aKoaa//YUxEaAOUiydvrV/pILqIHXTtqxSkATOnDA2u+jZ/61sD+L/412+7kzXRtWukhpQ== - dependencies: - safe-buffer "^5.0.1" - -base64-js@^1.3.1: - version "1.5.1" - resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" - integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA== - -bcrypt-pbkdf@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e" - integrity sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w== - dependencies: - tweetnacl "^0.14.3" - -bech32@1.1.4: - version "1.1.4" - resolved "https://registry.yarnpkg.com/bech32/-/bech32-1.1.4.tgz#e38c9f37bf179b8eb16ae3a772b40c356d4832e9" - integrity sha512-s0IrSOzLlbvX7yp4WBfPITzpAU8sqQcpsmwXDiKwrG4r491vwCO/XpejasRNl0piBMe/DvP4Tz0mIS/X1DPJBQ== - -bigint-crypto-utils@^3.0.23: - version "3.3.0" - resolved "https://registry.yarnpkg.com/bigint-crypto-utils/-/bigint-crypto-utils-3.3.0.tgz#72ad00ae91062cf07f2b1def9594006c279c1d77" - integrity sha512-jOTSb+drvEDxEq6OuUybOAv/xxoh3cuYRUIPyu8sSHQNKM303UQ2R1DAo45o1AkcIXw6fzbaFI1+xGGdaXs2lg== - -binary-extensions@^2.0.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.2.0.tgz#75f502eeaf9ffde42fc98829645be4ea76bd9e2d" - integrity sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA== - -bl@^1.0.0: - version "1.2.3" - resolved "https://registry.yarnpkg.com/bl/-/bl-1.2.3.tgz#1e8dd80142eac80d7158c9dccc047fb620e035e7" - integrity sha512-pvcNpa0UU69UT341rO6AYy4FVAIkUHuZXRIWbq+zHnsVcRzDDjIAhGuuYoi0d//cwIwtt4pkpKycWEfjdV+vww== - dependencies: - readable-stream "^2.3.5" - safe-buffer "^5.1.1" - -bl@^4.0.3: - version "4.1.0" - resolved "https://registry.yarnpkg.com/bl/-/bl-4.1.0.tgz#451535264182bec2fbbc83a62ab98cf11d9f7b3a" - integrity sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w== - dependencies: - buffer "^5.5.0" - inherits "^2.0.4" - readable-stream "^3.4.0" - -blakejs@^1.1.0: - version "1.2.1" - resolved "https://registry.yarnpkg.com/blakejs/-/blakejs-1.2.1.tgz#5057e4206eadb4a97f7c0b6e197a505042fc3814" - integrity sha512-QXUSXI3QVc/gJME0dBpXrag1kbzOqCjCX8/b54ntNyW6sjtoqxqRk3LTmXzaJoh71zMsDCjM+47jS7XiwN/+fQ== - -bn.js@^4.11.0, bn.js@^4.11.8, bn.js@^4.11.9: - version "4.12.0" - resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.12.0.tgz#775b3f278efbb9718eec7361f483fb36fbbfea88" - integrity sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA== - -bn.js@^5.2.0, bn.js@^5.2.1: - version "5.2.1" - resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-5.2.1.tgz#0bc527a6a0d18d0aa8d5b0538ce4a77dccfa7b70" - integrity sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ== - -brace-expansion@^1.1.7: - version "1.1.11" - resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" - integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== - dependencies: - balanced-match "^1.0.0" - concat-map "0.0.1" - -brace-expansion@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-2.0.1.tgz#1edc459e0f0c548486ecf9fc99f2221364b9a0ae" - integrity sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA== - dependencies: - balanced-match "^1.0.0" - -braces@~3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" - integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== - dependencies: - fill-range "^7.0.1" - -brorand@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f" - integrity sha512-cKV8tMCEpQs4hK/ik71d6LrPOnpkpGBR0wzxqr68g2m/LB2GxVYQroAjMJZRVM1Y4BCjCKc3vAamxSzOY2RP+w== - -browser-level@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/browser-level/-/browser-level-1.0.1.tgz#36e8c3183d0fe1c405239792faaab5f315871011" - integrity sha512-XECYKJ+Dbzw0lbydyQuJzwNXtOpbMSq737qxJN11sIRTErOMShvDpbzTlgju7orJKvx4epULolZAuJGLzCmWRQ== - dependencies: - abstract-level "^1.0.2" - catering "^2.1.1" - module-error "^1.0.2" - run-parallel-limit "^1.1.0" - -browser-stdout@1.3.1: - version "1.3.1" - resolved "https://registry.yarnpkg.com/browser-stdout/-/browser-stdout-1.3.1.tgz#baa559ee14ced73452229bad7326467c61fabd60" - integrity sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw== - -browserify-aes@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/browserify-aes/-/browserify-aes-1.2.0.tgz#326734642f403dabc3003209853bb70ad428ef48" - integrity sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA== - dependencies: - buffer-xor "^1.0.3" - cipher-base "^1.0.0" - create-hash "^1.1.0" - evp_bytestokey "^1.0.3" - inherits "^2.0.1" - safe-buffer "^5.0.1" - -bs58@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/bs58/-/bs58-4.0.1.tgz#be161e76c354f6f788ae4071f63f34e8c4f0a42a" - integrity sha512-Ok3Wdf5vOIlBrgCvTq96gBkJw+JUEzdBgyaza5HLtPm7yTHkjRy8+JzNyHF7BHa0bNWOQIp3m5YF0nnFcOIKLw== - dependencies: - base-x "^3.0.2" - -bs58check@^2.1.2: - version "2.1.2" - resolved "https://registry.yarnpkg.com/bs58check/-/bs58check-2.1.2.tgz#53b018291228d82a5aa08e7d796fdafda54aebfc" - integrity sha512-0TS1jicxdU09dwJMNZtVAfzPi6Q6QeN0pM1Fkzrjn+XYHvzMKPU3pHVpva+769iNVSfIYWf7LJ6WR+BuuMf8cA== - dependencies: - bs58 "^4.0.0" - create-hash "^1.1.0" - safe-buffer "^5.1.2" - -buffer-alloc-unsafe@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz#bd7dc26ae2972d0eda253be061dba992349c19f0" - integrity sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg== - -buffer-alloc@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/buffer-alloc/-/buffer-alloc-1.2.0.tgz#890dd90d923a873e08e10e5fd51a57e5b7cce0ec" - integrity sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow== - dependencies: - buffer-alloc-unsafe "^1.1.0" - buffer-fill "^1.0.0" - -buffer-fill@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/buffer-fill/-/buffer-fill-1.0.0.tgz#f8f78b76789888ef39f205cd637f68e702122b2c" - integrity sha512-T7zexNBwiiaCOGDg9xNX9PBmjrubblRkENuptryuI64URkXDFum9il/JGL8Lm8wYfAXpredVXXZz7eMHilimiQ== - -buffer-from@^1.0.0: - version "1.1.2" - resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5" - integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ== - -buffer-xor@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/buffer-xor/-/buffer-xor-1.0.3.tgz#26e61ed1422fb70dd42e6e36729ed51d855fe8d9" - integrity sha512-571s0T7nZWK6vB67HI5dyUF7wXiNcfaPPPTl6zYCNApANjIvYJTg7hlud/+cJpdAhS7dVzqMLmfhfHR3rAcOjQ== - -buffer@^5.5.0: - version "5.7.1" - resolved "https://registry.yarnpkg.com/buffer/-/buffer-5.7.1.tgz#ba62e7c13133053582197160851a8f648e99eed0" - integrity sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ== - dependencies: - base64-js "^1.3.1" - ieee754 "^1.1.13" - -buffer@^6.0.3: - version "6.0.3" - resolved "https://registry.yarnpkg.com/buffer/-/buffer-6.0.3.tgz#2ace578459cc8fbe2a70aaa8f52ee63b6a74c6c6" - integrity sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA== - dependencies: - base64-js "^1.3.1" - ieee754 "^1.2.1" - -buildcheck@~0.0.6: - version "0.0.6" - resolved "https://registry.yarnpkg.com/buildcheck/-/buildcheck-0.0.6.tgz#89aa6e417cfd1e2196e3f8fe915eb709d2fe4238" - integrity sha512-8f9ZJCUXyT1M35Jx7MkBgmBMo3oHTTBIPLiY9xyL0pl3T5RwcPEY8cUHr5LBNfu/fk6c2T4DJZuVM/8ZZT2D2A== - -bytes@3.1.2: - version "3.1.2" - resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.2.tgz#8b0beeb98605adf1b128fa4386403c009e0221a5" - integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg== - -camelcase@^6.0.0: - version "6.3.0" - resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a" - integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA== - -case@^1.6.3: - version "1.6.3" - resolved "https://registry.yarnpkg.com/case/-/case-1.6.3.tgz#0a4386e3e9825351ca2e6216c60467ff5f1ea1c9" - integrity sha512-mzDSXIPaFwVDvZAHqZ9VlbyF4yyXRuX6IvB06WvPYkqJVO24kX1PPhv9bfpKNFZyxYFmmgo03HUiD8iklmJYRQ== - -catering@^2.1.0, catering@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/catering/-/catering-2.1.1.tgz#66acba06ed5ee28d5286133982a927de9a04b510" - integrity sha512-K7Qy8O9p76sL3/3m7/zLKbRkyOlSZAgzEaLhyj2mXS8PsCud2Eo4hAb8aLtZqHh0QGqLcb9dlJSu6lHRVENm1w== - -chalk@4.1.2, chalk@^4.1.0: - version "4.1.2" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" - integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== - dependencies: - ansi-styles "^4.1.0" - supports-color "^7.1.0" - -chalk@^2.4.2: - version "2.4.2" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" - integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== - dependencies: - ansi-styles "^3.2.1" - escape-string-regexp "^1.0.5" - supports-color "^5.3.0" - -chokidar@3.5.3, chokidar@^3.4.0: - version "3.5.3" - resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.5.3.tgz#1cf37c8707b932bd1af1ae22c0432e2acd1903bd" - integrity sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw== - dependencies: - anymatch "~3.1.2" - braces "~3.0.2" - glob-parent "~5.1.2" - is-binary-path "~2.1.0" - is-glob "~4.0.1" - normalize-path "~3.0.0" - readdirp "~3.6.0" - optionalDependencies: - fsevents "~2.3.2" - -chownr@^1.0.1, chownr@^1.1.1: - version "1.1.4" - resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.1.4.tgz#6fc9d7b42d32a583596337666e7d08084da2cc6b" - integrity sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg== - -ci-info@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-2.0.0.tgz#67a9e964be31a51e15e5010d58e6f12834002f46" - integrity sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ== - -cipher-base@^1.0.0, cipher-base@^1.0.1, cipher-base@^1.0.3: - version "1.0.4" - resolved "https://registry.yarnpkg.com/cipher-base/-/cipher-base-1.0.4.tgz#8760e4ecc272f4c363532f926d874aae2c1397de" - integrity sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q== - dependencies: - inherits "^2.0.1" - safe-buffer "^5.0.1" - -classic-level@^1.2.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/classic-level/-/classic-level-1.3.0.tgz#5e36680e01dc6b271775c093f2150844c5edd5c8" - integrity sha512-iwFAJQYtqRTRM0F6L8h4JCt00ZSGdOyqh7yVrhhjrOpFhmBjNlRUey64MCiyo6UmQHMJ+No3c81nujPv+n9yrg== - dependencies: - abstract-level "^1.0.2" - catering "^2.1.0" - module-error "^1.0.1" - napi-macros "^2.2.2" - node-gyp-build "^4.3.0" - -clean-stack@^2.0.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-2.2.0.tgz#ee8472dbb129e727b31e8a10a427dee9dfe4008b" - integrity sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A== - -cliui@^7.0.2: - version "7.0.4" - resolved "https://registry.yarnpkg.com/cliui/-/cliui-7.0.4.tgz#a0265ee655476fc807aea9df3df8df7783808b4f" - integrity sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ== - dependencies: - string-width "^4.2.0" - strip-ansi "^6.0.0" - wrap-ansi "^7.0.0" - -color-convert@^1.9.0: - version "1.9.3" - resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" - integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== - dependencies: - color-name "1.1.3" - -color-convert@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" - integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== - dependencies: - color-name "~1.1.4" - -color-name@1.1.3: - version "1.1.3" - resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" - integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw== - -color-name@~1.1.4: - version "1.1.4" - resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" - integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== - -command-exists@^1.2.8: - version "1.2.9" - resolved "https://registry.yarnpkg.com/command-exists/-/command-exists-1.2.9.tgz#c50725af3808c8ab0260fd60b01fbfa25b954f69" - integrity sha512-LTQ/SGc+s0Xc0Fu5WaKnR0YiygZkm9eKFvyS+fRsU7/ZWFF8ykFM6Pc9aCVf1+xasOOZpO3BAVgVrKvsqKHV7w== - -commander@3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/commander/-/commander-3.0.2.tgz#6837c3fb677ad9933d1cfba42dd14d5117d6b39e" - integrity sha512-Gar0ASD4BDyKC4hl4DwHqDrmvjoxWKZigVnAbn5H1owvm4CxCPdb0HQDehwNYMJpla5+M2tPmPARzhtYuwpHow== - -concat-map@0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" - integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== - -concat-stream@~1.6.2: - version "1.6.2" - resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.2.tgz#904bdf194cd3122fc675c77fc4ac3d4ff0fd1a34" - integrity sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw== - dependencies: - buffer-from "^1.0.0" - inherits "^2.0.3" - readable-stream "^2.2.2" - typedarray "^0.0.6" - -cookie@^0.4.1: - version "0.4.2" - resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.4.2.tgz#0e41f24de5ecf317947c82fc789e06a884824432" - integrity sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA== - -core-util-is@~1.0.0: - version "1.0.3" - resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.3.tgz#a6042d3634c2b27e9328f837b965fac83808db85" - integrity sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ== - -cpu-features@~0.0.8: - version "0.0.9" - resolved "https://registry.yarnpkg.com/cpu-features/-/cpu-features-0.0.9.tgz#5226b92f0f1c63122b0a3eb84cb8335a4de499fc" - integrity sha512-AKjgn2rP2yJyfbepsmLfiYcmtNn/2eUvocUyM/09yB0YDiz39HteK/5/T4Onf0pmdYDMgkBoGvRLvEguzyL7wQ== - dependencies: - buildcheck "~0.0.6" - nan "^2.17.0" - -crc-32@^1.2.0: - version "1.2.2" - resolved "https://registry.yarnpkg.com/crc-32/-/crc-32-1.2.2.tgz#3cad35a934b8bf71f25ca524b6da51fb7eace2ff" - integrity sha512-ROmzCKrTnOwybPcJApAA6WBWij23HVfGVNKqqrZpuyZOHqK2CwHSvpGuyt/UNNvaIjEd8X5IFGp4Mh+Ie1IHJQ== - -create-hash@^1.1.0, create-hash@^1.1.2, create-hash@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/create-hash/-/create-hash-1.2.0.tgz#889078af11a63756bcfb59bd221996be3a9ef196" - integrity sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg== - dependencies: - cipher-base "^1.0.1" - inherits "^2.0.1" - md5.js "^1.3.4" - ripemd160 "^2.0.1" - sha.js "^2.4.0" - -create-hmac@^1.1.4, create-hmac@^1.1.7: - version "1.1.7" - resolved "https://registry.yarnpkg.com/create-hmac/-/create-hmac-1.1.7.tgz#69170c78b3ab957147b2b8b04572e47ead2243ff" - integrity sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg== - dependencies: - cipher-base "^1.0.3" - create-hash "^1.1.0" - inherits "^2.0.1" - ripemd160 "^2.0.0" - safe-buffer "^5.0.1" - sha.js "^2.4.8" - -debug@4, debug@4.3.4, debug@^4.1.1, debug@^4.3.3: - version "4.3.4" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" - integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== - dependencies: - ms "2.1.2" - -debug@^3.2.6: - version "3.2.7" - resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.7.tgz#72580b7e9145fb39b6676f9c5e5fb100b934179a" - integrity sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ== - dependencies: - ms "^2.1.1" - -decamelize@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-4.0.0.tgz#aa472d7bf660eb15f3494efd531cab7f2a709837" - integrity sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ== - -depd@2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/depd/-/depd-2.0.0.tgz#b696163cc757560d09cf22cc8fad1571b79e76df" - integrity sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw== - -diff@5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/diff/-/diff-5.0.0.tgz#7ed6ad76d859d030787ec35855f5b1daf31d852b" - integrity sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w== - -docker-modem@^1.0.8: - version "1.0.9" - resolved "https://registry.yarnpkg.com/docker-modem/-/docker-modem-1.0.9.tgz#a1f13e50e6afb6cf3431b2d5e7aac589db6aaba8" - integrity sha512-lVjqCSCIAUDZPAZIeyM125HXfNvOmYYInciphNrLrylUtKyW66meAjSPXWchKVzoIYZx69TPnAepVSSkeawoIw== - dependencies: - JSONStream "1.3.2" - debug "^3.2.6" - readable-stream "~1.0.26-4" - split-ca "^1.0.0" - -docker-modem@^3.0.0: - version "3.0.8" - resolved "https://registry.yarnpkg.com/docker-modem/-/docker-modem-3.0.8.tgz#ef62c8bdff6e8a7d12f0160988c295ea8705e77a" - integrity sha512-f0ReSURdM3pcKPNS30mxOHSbaFLcknGmQjwSfmbcdOw1XWKXVhukM3NJHhr7NpY9BIyyWQb0EBo3KQvvuU5egQ== - dependencies: - debug "^4.1.1" - readable-stream "^3.5.0" - split-ca "^1.0.1" - ssh2 "^1.11.0" - -dockerode@^2.5.8: - version "2.5.8" - resolved "https://registry.yarnpkg.com/dockerode/-/dockerode-2.5.8.tgz#1b661e36e1e4f860e25f56e0deabe9f87f1d0acc" - integrity sha512-+7iOUYBeDTScmOmQqpUYQaE7F4vvIt6+gIZNHWhqAQEI887tiPFB9OvXI/HzQYqfUNvukMK+9myLW63oTJPZpw== - dependencies: - concat-stream "~1.6.2" - docker-modem "^1.0.8" - tar-fs "~1.16.3" - -dockerode@^3.3.4: - version "3.3.5" - resolved "https://registry.yarnpkg.com/dockerode/-/dockerode-3.3.5.tgz#7ae3f40f2bec53ae5e9a741ce655fff459745629" - integrity sha512-/0YNa3ZDNeLr/tSckmD69+Gq+qVNhvKfAHNeZJBnp7EOP6RGKV8ORrJHkUn20So5wU+xxT7+1n5u8PjHbfjbSA== - dependencies: - "@balena/dockerignore" "^1.0.2" - docker-modem "^3.0.0" - tar-fs "~2.0.1" - -elliptic@6.5.4, elliptic@^6.5.2, elliptic@^6.5.4: - version "6.5.4" - resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.4.tgz#da37cebd31e79a1367e941b592ed1fbebd58abbb" - integrity sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ== - dependencies: - bn.js "^4.11.9" - brorand "^1.1.0" - hash.js "^1.0.0" - hmac-drbg "^1.0.1" - inherits "^2.0.4" - minimalistic-assert "^1.0.1" - minimalistic-crypto-utils "^1.0.1" - -emoji-regex@^8.0.0: - version "8.0.0" - resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" - integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== - -end-of-stream@^1.0.0, end-of-stream@^1.1.0, end-of-stream@^1.4.1: - version "1.4.4" - resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0" - integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q== - dependencies: - once "^1.4.0" - -enquirer@^2.3.0: - version "2.4.1" - resolved "https://registry.yarnpkg.com/enquirer/-/enquirer-2.4.1.tgz#93334b3fbd74fc7097b224ab4a8fb7e40bf4ae56" - integrity sha512-rRqJg/6gd538VHvR3PSrdRBb/1Vy2YfzHqzvbhGIQpDRKIa4FgV/54b5Q1xYSxOOwKvjXweS26E0Q+nAMwp2pQ== - dependencies: - ansi-colors "^4.1.1" - strip-ansi "^6.0.1" - -env-paths@^2.2.0: - version "2.2.1" - resolved "https://registry.yarnpkg.com/env-paths/-/env-paths-2.2.1.tgz#420399d416ce1fbe9bc0a07c62fa68d67fd0f8f2" - integrity sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A== - -escalade@^3.1.1: - version "3.1.1" - resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.1.tgz#d8cfdc7000965c5a0174b4a82eaa5c0552742e40" - integrity sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw== - -escape-string-regexp@4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" - integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== - -escape-string-regexp@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" - integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg== - -ethereum-cryptography@0.1.3, ethereum-cryptography@^0.1.3: - version "0.1.3" - resolved "https://registry.yarnpkg.com/ethereum-cryptography/-/ethereum-cryptography-0.1.3.tgz#8d6143cfc3d74bf79bbd8edecdf29e4ae20dd191" - integrity sha512-w8/4x1SGGzc+tO97TASLja6SLd3fRIK2tLVcV2Gx4IB21hE19atll5Cq9o3d0ZmAYC/8aw0ipieTSiekAea4SQ== - dependencies: - "@types/pbkdf2" "^3.0.0" - "@types/secp256k1" "^4.0.1" - blakejs "^1.1.0" - browserify-aes "^1.2.0" - bs58check "^2.1.2" - create-hash "^1.2.0" - create-hmac "^1.1.7" - hash.js "^1.1.7" - keccak "^3.0.0" - pbkdf2 "^3.0.17" - randombytes "^2.1.0" - safe-buffer "^5.1.2" - scrypt-js "^3.0.0" - secp256k1 "^4.0.1" - setimmediate "^1.0.5" - -ethereum-cryptography@^1.0.3: - version "1.2.0" - resolved "https://registry.yarnpkg.com/ethereum-cryptography/-/ethereum-cryptography-1.2.0.tgz#5ccfa183e85fdaf9f9b299a79430c044268c9b3a" - integrity sha512-6yFQC9b5ug6/17CQpCyE3k9eKBMdhyVjzUy1WkiuY/E4vj/SXDBbCw8QEIaXqf0Mf2SnY6RmpDcwlUmBSS0EJw== - dependencies: - "@noble/hashes" "1.2.0" - "@noble/secp256k1" "1.7.1" - "@scure/bip32" "1.1.5" - "@scure/bip39" "1.1.1" - -ethereumjs-abi@^0.6.8: - version "0.6.8" - resolved "https://registry.yarnpkg.com/ethereumjs-abi/-/ethereumjs-abi-0.6.8.tgz#71bc152db099f70e62f108b7cdfca1b362c6fcae" - integrity sha512-Tx0r/iXI6r+lRsdvkFDlut0N08jWMnKRZ6Gkq+Nmw75lZe4e6o3EkSnkaBP5NF6+m5PTGAr9JP43N3LyeoglsA== - dependencies: - bn.js "^4.11.8" - ethereumjs-util "^6.0.0" - -ethereumjs-util@^6.0.0, ethereumjs-util@^6.2.1: - version "6.2.1" - resolved "https://registry.yarnpkg.com/ethereumjs-util/-/ethereumjs-util-6.2.1.tgz#fcb4e4dd5ceacb9d2305426ab1a5cd93e3163b69" - integrity sha512-W2Ktez4L01Vexijrm5EB6w7dg4n/TgpoYU4avuT5T3Vmnw/eCRtiBrJfQYS/DCSvDIOLn2k57GcHdeBcgVxAqw== - dependencies: - "@types/bn.js" "^4.11.3" - bn.js "^4.11.0" - create-hash "^1.1.2" - elliptic "^6.5.2" - ethereum-cryptography "^0.1.3" - ethjs-util "0.1.6" - rlp "^2.2.3" - -ethers@^5.7.1: - version "5.7.2" - resolved "https://registry.yarnpkg.com/ethers/-/ethers-5.7.2.tgz#3a7deeabbb8c030d4126b24f84e525466145872e" - integrity sha512-wswUsmWo1aOK8rR7DIKiWSw9DbLWe6x98Jrn8wcTflTVvaXhAMaB5zGAXy0GYQEQp9iO1iSHWVyARQm11zUtyg== - dependencies: - "@ethersproject/abi" "5.7.0" - "@ethersproject/abstract-provider" "5.7.0" - "@ethersproject/abstract-signer" "5.7.0" - "@ethersproject/address" "5.7.0" - "@ethersproject/base64" "5.7.0" - "@ethersproject/basex" "5.7.0" - "@ethersproject/bignumber" "5.7.0" - "@ethersproject/bytes" "5.7.0" - "@ethersproject/constants" "5.7.0" - "@ethersproject/contracts" "5.7.0" - "@ethersproject/hash" "5.7.0" - "@ethersproject/hdnode" "5.7.0" - "@ethersproject/json-wallets" "5.7.0" - "@ethersproject/keccak256" "5.7.0" - "@ethersproject/logger" "5.7.0" - "@ethersproject/networks" "5.7.1" - "@ethersproject/pbkdf2" "5.7.0" - "@ethersproject/properties" "5.7.0" - "@ethersproject/providers" "5.7.2" - "@ethersproject/random" "5.7.0" - "@ethersproject/rlp" "5.7.0" - "@ethersproject/sha2" "5.7.0" - "@ethersproject/signing-key" "5.7.0" - "@ethersproject/solidity" "5.7.0" - "@ethersproject/strings" "5.7.0" - "@ethersproject/transactions" "5.7.0" - "@ethersproject/units" "5.7.0" - "@ethersproject/wallet" "5.7.0" - "@ethersproject/web" "5.7.1" - "@ethersproject/wordlists" "5.7.0" - -ethjs-util@0.1.6, ethjs-util@^0.1.6: - version "0.1.6" - resolved "https://registry.yarnpkg.com/ethjs-util/-/ethjs-util-0.1.6.tgz#f308b62f185f9fe6237132fb2a9818866a5cd536" - integrity sha512-CUnVOQq7gSpDHZVVrQW8ExxUETWrnrvXYvYz55wOU8Uj4VCgw56XC2B/fVqQN+f7gmrnRHSLVnFAwsCuNwji8w== - dependencies: - is-hex-prefixed "1.0.0" - strip-hex-prefix "1.0.0" - -event-target-shim@^5.0.0: - version "5.0.1" - resolved "https://registry.yarnpkg.com/event-target-shim/-/event-target-shim-5.0.1.tgz#5d4d3ebdf9583d63a5333ce2deb7480ab2b05789" - integrity sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ== - -evp_bytestokey@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz#7fcbdb198dc71959432efe13842684e0525acb02" - integrity sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA== - dependencies: - md5.js "^1.3.4" - safe-buffer "^5.1.1" - -fill-range@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" - integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== - dependencies: - to-regex-range "^5.0.1" - -find-up@5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/find-up/-/find-up-5.0.0.tgz#4c92819ecb7083561e4f4a240a86be5198f536fc" - integrity sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng== - dependencies: - locate-path "^6.0.0" - path-exists "^4.0.0" - -find-up@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/find-up/-/find-up-2.1.0.tgz#45d1b7e506c717ddd482775a2b77920a3c0c57a7" - integrity sha512-NWzkk0jSJtTt08+FBFMvXoeZnOJD+jTtsRmBYbAIzJdX6l7dLgR7CTubCM5/eDdPUBvLCeVasP1brfVR/9/EZQ== - dependencies: - locate-path "^2.0.0" - -flat@^5.0.2: - version "5.0.2" - resolved "https://registry.yarnpkg.com/flat/-/flat-5.0.2.tgz#8ca6fe332069ffa9d324c327198c598259ceb241" - integrity sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ== - -follow-redirects@^1.12.1: - version "1.15.3" - resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.3.tgz#fe2f3ef2690afce7e82ed0b44db08165b207123a" - integrity sha512-1VzOtuEM8pC9SFU1E+8KfTjZyMztRsgEfwQl44z8A25uy13jSzTj6dyK2Df52iV0vgHCfBwLhDWevLn95w5v6Q== - -fp-ts@1.19.3: - version "1.19.3" - resolved "https://registry.yarnpkg.com/fp-ts/-/fp-ts-1.19.3.tgz#261a60d1088fbff01f91256f91d21d0caaaaa96f" - integrity sha512-H5KQDspykdHuztLTg+ajGN0Z2qUjcEf3Ybxc6hLt0k7/zPkn29XnKnxlBPyW2XIddWrGaJBzBl4VLYOtk39yZg== - -fp-ts@^1.0.0: - version "1.19.5" - resolved "https://registry.yarnpkg.com/fp-ts/-/fp-ts-1.19.5.tgz#3da865e585dfa1fdfd51785417357ac50afc520a" - integrity sha512-wDNqTimnzs8QqpldiId9OavWK2NptormjXnRJTQecNjzwfyp6P/8s/zG8e4h3ja3oqkKaY72UlTjQYt/1yXf9A== - -fs-constants@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/fs-constants/-/fs-constants-1.0.0.tgz#6be0de9be998ce16af8afc24497b9ee9b7ccd9ad" - integrity sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow== - -fs-extra@^0.30.0: - version "0.30.0" - resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.30.0.tgz#f233ffcc08d4da7d432daa449776989db1df93f0" - integrity sha512-UvSPKyhMn6LEd/WpUaV9C9t3zATuqoqfWc3QdPhPLb58prN9tqYPlPWi8Krxi44loBoUzlobqZ3+8tGpxxSzwA== - dependencies: - graceful-fs "^4.1.2" - jsonfile "^2.1.0" - klaw "^1.0.0" - path-is-absolute "^1.0.0" - rimraf "^2.2.8" - -fs-extra@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-7.0.1.tgz#4f189c44aa123b895f722804f55ea23eadc348e9" - integrity sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw== - dependencies: - graceful-fs "^4.1.2" - jsonfile "^4.0.0" - universalify "^0.1.0" - -fs.realpath@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" - integrity sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw== - -fsevents@~2.3.2: - version "2.3.3" - resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.3.tgz#cac6407785d03675a2a5e1a5305c697b347d90d6" - integrity sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw== - -functional-red-black-tree@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz#1b0ab3bd553b2a0d6399d29c0e3ea0b252078327" - integrity sha512-dsKNQNdj6xA3T+QlADDA7mOSlX0qiMINjn0cgr+eGHGsbSHzTabcIogz2+p/iqP1Xs6EP/sS2SbqH+brGTbq0g== - -get-caller-file@^2.0.5: - version "2.0.5" - resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" - integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== - -glob-parent@~5.1.2: - version "5.1.2" - resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4" - integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow== - dependencies: - is-glob "^4.0.1" - -glob@7.2.0: - version "7.2.0" - resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.0.tgz#d15535af7732e02e948f4c41628bd910293f6023" - integrity sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q== - dependencies: - fs.realpath "^1.0.0" - inflight "^1.0.4" - inherits "2" - minimatch "^3.0.4" - once "^1.3.0" - path-is-absolute "^1.0.0" - -glob@^7.1.3: - version "7.2.3" - resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.3.tgz#b8df0fb802bbfa8e89bd1d938b4e16578ed44f2b" - integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q== - dependencies: - fs.realpath "^1.0.0" - inflight "^1.0.4" - inherits "2" - minimatch "^3.1.1" - once "^1.3.0" - path-is-absolute "^1.0.0" - -graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.1.9: - version "4.2.11" - resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3" - integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ== - -hardhat@=2.16.0: - version "2.16.0" - resolved "https://registry.yarnpkg.com/hardhat/-/hardhat-2.16.0.tgz#c5611d433416b31f6ce92f733b1f1b5236ad6230" - integrity sha512-7VQEJPQRAZdtrYUZaU9GgCpP3MBNy/pTdscARNJQMWKj5C+R7V32G5uIZKIqZ4QiqXa6CBfxxe+G+ahxUbHZHA== - dependencies: - "@ethersproject/abi" "^5.1.2" - "@metamask/eth-sig-util" "^4.0.0" - "@nomicfoundation/ethereumjs-block" "5.0.1" - "@nomicfoundation/ethereumjs-blockchain" "7.0.1" - "@nomicfoundation/ethereumjs-common" "4.0.1" - "@nomicfoundation/ethereumjs-evm" "2.0.1" - "@nomicfoundation/ethereumjs-rlp" "5.0.1" - "@nomicfoundation/ethereumjs-statemanager" "2.0.1" - "@nomicfoundation/ethereumjs-trie" "6.0.1" - "@nomicfoundation/ethereumjs-tx" "5.0.1" - "@nomicfoundation/ethereumjs-util" "9.0.1" - "@nomicfoundation/ethereumjs-vm" "7.0.1" - "@nomicfoundation/solidity-analyzer" "^0.1.0" - "@sentry/node" "^5.18.1" - "@types/bn.js" "^5.1.0" - "@types/lru-cache" "^5.1.0" - abort-controller "^3.0.0" - adm-zip "^0.4.16" - aggregate-error "^3.0.0" - ansi-escapes "^4.3.0" - chalk "^2.4.2" - chokidar "^3.4.0" - ci-info "^2.0.0" - debug "^4.1.1" - enquirer "^2.3.0" - env-paths "^2.2.0" - ethereum-cryptography "^1.0.3" - ethereumjs-abi "^0.6.8" - find-up "^2.1.0" - fp-ts "1.19.3" - fs-extra "^7.0.1" - glob "7.2.0" - immutable "^4.0.0-rc.12" - io-ts "1.10.4" - keccak "^3.0.2" - lodash "^4.17.11" - mnemonist "^0.38.0" - mocha "^10.0.0" - p-map "^4.0.0" - raw-body "^2.4.1" - resolve "1.17.0" - semver "^6.3.0" - solc "0.7.3" - source-map-support "^0.5.13" - stacktrace-parser "^0.1.10" - tsort "0.0.1" - undici "^5.14.0" - uuid "^8.3.2" - ws "^7.4.6" - -has-flag@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" - integrity sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw== - -has-flag@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" - integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== - -hash-base@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/hash-base/-/hash-base-3.1.0.tgz#55c381d9e06e1d2997a883b4a3fddfe7f0d3af33" - integrity sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA== - dependencies: - inherits "^2.0.4" - readable-stream "^3.6.0" - safe-buffer "^5.2.0" - -hash.js@1.1.7, hash.js@^1.0.0, hash.js@^1.0.3, hash.js@^1.1.7: - version "1.1.7" - resolved "https://registry.yarnpkg.com/hash.js/-/hash.js-1.1.7.tgz#0babca538e8d4ee4a0f8988d68866537a003cf42" - integrity sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA== - dependencies: - inherits "^2.0.3" - minimalistic-assert "^1.0.1" - -he@1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f" - integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw== - -hmac-drbg@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/hmac-drbg/-/hmac-drbg-1.0.1.tgz#d2745701025a6c775a6c545793ed502fc0c649a1" - integrity sha512-Tti3gMqLdZfhOQY1Mzf/AanLiqh1WTiJgEj26ZuYQ9fbkLomzGchCws4FyrSd4VkpBfiNhaE1On+lOz894jvXg== - dependencies: - hash.js "^1.0.3" - minimalistic-assert "^1.0.0" - minimalistic-crypto-utils "^1.0.1" - -http-errors@2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-2.0.0.tgz#b7774a1486ef73cf7667ac9ae0858c012c57b9d3" - integrity sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ== - dependencies: - depd "2.0.0" - inherits "2.0.4" - setprototypeof "1.2.0" - statuses "2.0.1" - toidentifier "1.0.1" - -https-proxy-agent@^5.0.0: - version "5.0.1" - resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz#c59ef224a04fe8b754f3db0063a25ea30d0005d6" - integrity sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA== - dependencies: - agent-base "6" - debug "4" - -iconv-lite@0.4.24: - version "0.4.24" - resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" - integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== - dependencies: - safer-buffer ">= 2.1.2 < 3" - -ieee754@^1.1.13, ieee754@^1.2.1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.2.1.tgz#8eb7a10a63fff25d15a57b001586d177d1b0d352" - integrity sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA== - -immutable@^4.0.0-rc.12: - version "4.3.4" - resolved "https://registry.yarnpkg.com/immutable/-/immutable-4.3.4.tgz#2e07b33837b4bb7662f288c244d1ced1ef65a78f" - integrity sha512-fsXeu4J4i6WNWSikpI88v/PcVflZz+6kMhUfIwc5SY+poQRPnaf5V7qds6SUyUN3cVxEzuCab7QIoLOQ+DQ1wA== - -indent-string@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-4.0.0.tgz#624f8f4497d619b2d9768531d58f4122854d7251" - integrity sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg== - -inflight@^1.0.4: - version "1.0.6" - resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" - integrity sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA== - dependencies: - once "^1.3.0" - wrappy "1" - -inherits@2, inherits@2.0.4, inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4, inherits@~2.0.1, inherits@~2.0.3: - version "2.0.4" - resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" - integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== - -io-ts@1.10.4: - version "1.10.4" - resolved "https://registry.yarnpkg.com/io-ts/-/io-ts-1.10.4.tgz#cd5401b138de88e4f920adbcb7026e2d1967e6e2" - integrity sha512-b23PteSnYXSONJ6JQXRAlvJhuw8KOtkqa87W4wDtvMrud/DTJd5X+NpOOI+O/zZwVq6v0VLAaJ+1EDViKEuN9g== - dependencies: - fp-ts "^1.0.0" - -is-binary-path@~2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-2.1.0.tgz#ea1f7f3b80f064236e83470f86c09c254fb45b09" - integrity sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw== - dependencies: - binary-extensions "^2.0.0" - -is-buffer@^2.0.5: - version "2.0.5" - resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-2.0.5.tgz#ebc252e400d22ff8d77fa09888821a24a658c191" - integrity sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ== - -is-extglob@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" - integrity sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ== - -is-fullwidth-code-point@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" - integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== - -is-glob@^4.0.1, is-glob@~4.0.1: - version "4.0.3" - resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.3.tgz#64f61e42cbbb2eec2071a9dac0b28ba1e65d5084" - integrity sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg== - dependencies: - is-extglob "^2.1.1" - -is-hex-prefixed@1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-hex-prefixed/-/is-hex-prefixed-1.0.0.tgz#7d8d37e6ad77e5d127148913c573e082d777f554" - integrity sha512-WvtOiug1VFrE9v1Cydwm+FnXd3+w9GaeVUss5W4v/SLy3UW00vP+6iNF2SdnfiBoLy4bTqVdkftNGTUeOFVsbA== - -is-number@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" - integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== - -is-plain-obj@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-2.1.0.tgz#45e42e37fccf1f40da8e5f76ee21515840c09287" - integrity sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA== - -is-unicode-supported@^0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz#3f26c76a809593b52bfa2ecb5710ed2779b522a7" - integrity sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw== - -isarray@0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf" - integrity sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ== - -isarray@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" - integrity sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ== - -js-sdsl@^4.1.4: - version "4.4.2" - resolved "https://registry.yarnpkg.com/js-sdsl/-/js-sdsl-4.4.2.tgz#2e3c031b1f47d3aca8b775532e3ebb0818e7f847" - integrity sha512-dwXFwByc/ajSV6m5bcKAPwe4yDDF6D614pxmIi5odytzxRlwqF6nwoiCek80Ixc7Cvma5awClxrzFtxCQvcM8w== - -js-sha3@0.8.0: - version "0.8.0" - resolved "https://registry.yarnpkg.com/js-sha3/-/js-sha3-0.8.0.tgz#b9b7a5da73afad7dedd0f8c463954cbde6818840" - integrity sha512-gF1cRrHhIzNfToc802P800N8PpXS+evLLXfsVpowqmAFR9uwbi89WvXg2QspOmXL8QL86J4T1EpFu+yUkwJY3Q== - -js-yaml@4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" - integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== - dependencies: - argparse "^2.0.1" - -jsonfile@^2.1.0: - version "2.4.0" - resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-2.4.0.tgz#3736a2b428b87bbda0cc83b53fa3d633a35c2ae8" - integrity sha512-PKllAqbgLgxHaj8TElYymKCAgrASebJrWpTnEkOaTowt23VKXXN0sUeriJ+eh7y6ufb/CC5ap11pz71/cM0hUw== - optionalDependencies: - graceful-fs "^4.1.6" - -jsonfile@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-4.0.0.tgz#8771aae0799b64076b76640fca058f9c10e33ecb" - integrity sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg== - optionalDependencies: - graceful-fs "^4.1.6" - -jsonparse@^1.2.0: - version "1.3.1" - resolved "https://registry.yarnpkg.com/jsonparse/-/jsonparse-1.3.1.tgz#3f4dae4a91fac315f71062f8521cc239f1366280" - integrity sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg== - -keccak@^3.0.0, keccak@^3.0.2: - version "3.0.4" - resolved "https://registry.yarnpkg.com/keccak/-/keccak-3.0.4.tgz#edc09b89e633c0549da444432ecf062ffadee86d" - integrity sha512-3vKuW0jV8J3XNTzvfyicFR5qvxrSAGl7KIhvgOu5cmWwM7tZRj3fMbj/pfIf4be7aznbc+prBWGjywox/g2Y6Q== - dependencies: - node-addon-api "^2.0.0" - node-gyp-build "^4.2.0" - readable-stream "^3.6.0" - -klaw@^1.0.0: - version "1.3.1" - resolved "https://registry.yarnpkg.com/klaw/-/klaw-1.3.1.tgz#4088433b46b3b1ba259d78785d8e96f73ba02439" - integrity sha512-TED5xi9gGQjGpNnvRWknrwAB1eL5GciPfVFOt3Vk1OJCVDQbzuSfrF3hkUQKlsgKrG1F+0t5W0m+Fje1jIt8rw== - optionalDependencies: - graceful-fs "^4.1.9" - -level-supports@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/level-supports/-/level-supports-4.0.1.tgz#431546f9d81f10ff0fea0e74533a0e875c08c66a" - integrity sha512-PbXpve8rKeNcZ9C1mUicC9auIYFyGpkV9/i6g76tLgANwWhtG2v7I4xNBUlkn3lE2/dZF3Pi0ygYGtLc4RXXdA== - -level-transcoder@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/level-transcoder/-/level-transcoder-1.0.1.tgz#f8cef5990c4f1283d4c86d949e73631b0bc8ba9c" - integrity sha512-t7bFwFtsQeD8cl8NIoQ2iwxA0CL/9IFw7/9gAjOonH0PWTTiRfY7Hq+Ejbsxh86tXobDQ6IOiddjNYIfOBs06w== - dependencies: - buffer "^6.0.3" - module-error "^1.0.1" - -level@^8.0.0: - version "8.0.0" - resolved "https://registry.yarnpkg.com/level/-/level-8.0.0.tgz#41b4c515dabe28212a3e881b61c161ffead14394" - integrity sha512-ypf0jjAk2BWI33yzEaaotpq7fkOPALKAgDBxggO6Q9HGX2MRXn0wbP1Jn/tJv1gtL867+YOjOB49WaUF3UoJNQ== - dependencies: - browser-level "^1.0.1" - classic-level "^1.2.0" - -locate-path@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-2.0.0.tgz#2b568b265eec944c6d9c0de9c3dbbbca0354cd8e" - integrity sha512-NCI2kiDkyR7VeEKm27Kda/iQHyKJe1Bu0FlTbYp3CqJu+9IFe9bLyAjMxf5ZDDbEg+iMPzB5zYyUTSm8wVTKmA== - dependencies: - p-locate "^2.0.0" - path-exists "^3.0.0" - -locate-path@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-6.0.0.tgz#55321eb309febbc59c4801d931a72452a681d286" - integrity sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw== - dependencies: - p-locate "^5.0.0" - -lodash@^4.17.11: - version "4.17.21" - resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" - integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== - -log-symbols@4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-4.1.0.tgz#3fbdbb95b4683ac9fc785111e792e558d4abd503" - integrity sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg== - dependencies: - chalk "^4.1.0" - is-unicode-supported "^0.1.0" - -lru-cache@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" - integrity sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w== - dependencies: - yallist "^3.0.2" - -lru_map@^0.3.3: - version "0.3.3" - resolved "https://registry.yarnpkg.com/lru_map/-/lru_map-0.3.3.tgz#b5c8351b9464cbd750335a79650a0ec0e56118dd" - integrity sha512-Pn9cox5CsMYngeDbmChANltQl+5pi6XmTrraMSzhPmMBbmgcxmqWry0U3PGapCU1yB4/LqCcom7qhHZiF/jGfQ== - -mcl-wasm@^0.7.1: - version "0.7.9" - resolved "https://registry.yarnpkg.com/mcl-wasm/-/mcl-wasm-0.7.9.tgz#c1588ce90042a8700c3b60e40efb339fc07ab87f" - integrity sha512-iJIUcQWA88IJB/5L15GnJVnSQJmf/YaxxV6zRavv83HILHaJQb6y0iFyDMdDO0gN8X37tdxmAOrH/P8B6RB8sQ== - -md5.js@^1.3.4: - version "1.3.5" - resolved "https://registry.yarnpkg.com/md5.js/-/md5.js-1.3.5.tgz#b5d07b8e3216e3e27cd728d72f70d1e6a342005f" - integrity sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg== - dependencies: - hash-base "^3.0.0" - inherits "^2.0.1" - safe-buffer "^5.1.2" - -memory-level@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/memory-level/-/memory-level-1.0.0.tgz#7323c3fd368f9af2f71c3cd76ba403a17ac41692" - integrity sha512-UXzwewuWeHBz5krr7EvehKcmLFNoXxGcvuYhC41tRnkrTbJohtS7kVn9akmgirtRygg+f7Yjsfi8Uu5SGSQ4Og== - dependencies: - abstract-level "^1.0.0" - functional-red-black-tree "^1.0.1" - module-error "^1.0.1" - -memorystream@^0.3.1: - version "0.3.1" - resolved "https://registry.yarnpkg.com/memorystream/-/memorystream-0.3.1.tgz#86d7090b30ce455d63fbae12dda51a47ddcaf9b2" - integrity sha512-S3UwM3yj5mtUSEfP41UZmt/0SCoVYUcU1rkXv+BQ5Ig8ndL4sPoJNBUJERafdPb5jjHJGuMgytgKvKIf58XNBw== - -minimalistic-assert@^1.0.0, minimalistic-assert@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz#2e194de044626d4a10e7f7fbc00ce73e83e4d5c7" - integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A== - -minimalistic-crypto-utils@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz#f6c00c1c0b082246e5c4d99dfb8c7c083b2b582a" - integrity sha512-JIYlbt6g8i5jKfJ3xz7rF0LXmv2TkDxBLUkiBeZ7bAx4GnnNMr8xFpGnOxn6GhTEHx3SjRrZEoU+j04prX1ktg== - -minimatch@5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-5.0.1.tgz#fb9022f7528125187c92bd9e9b6366be1cf3415b" - integrity sha512-nLDxIFRyhDblz3qMuq+SoRZED4+miJ/G+tdDrjkkkRnjAsBexeGpgjLEQ0blJy7rHhR2b93rhQY4SvyWu9v03g== - dependencies: - brace-expansion "^2.0.1" - -minimatch@^3.0.4, minimatch@^3.1.1: - version "3.1.2" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" - integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== - dependencies: - brace-expansion "^1.1.7" - -minimist@^1.2.6: - version "1.2.8" - resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" - integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== - -mkdirp-classic@^0.5.2: - version "0.5.3" - resolved "https://registry.yarnpkg.com/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz#fa10c9115cc6d8865be221ba47ee9bed78601113" - integrity sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A== - -mkdirp@^0.5.1: - version "0.5.6" - resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.6.tgz#7def03d2432dcae4ba1d611445c48396062255f6" - integrity sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw== - dependencies: - minimist "^1.2.6" - -mnemonist@^0.38.0: - version "0.38.5" - resolved "https://registry.yarnpkg.com/mnemonist/-/mnemonist-0.38.5.tgz#4adc7f4200491237fe0fa689ac0b86539685cade" - integrity sha512-bZTFT5rrPKtPJxj8KSV0WkPyNxl72vQepqqVUAW2ARUpUSF2qXMB6jZj7hW5/k7C1rtpzqbD/IIbJwLXUjCHeg== - dependencies: - obliterator "^2.0.0" - -mocha@^10.0.0: - version "10.2.0" - resolved "https://registry.yarnpkg.com/mocha/-/mocha-10.2.0.tgz#1fd4a7c32ba5ac372e03a17eef435bd00e5c68b8" - integrity sha512-IDY7fl/BecMwFHzoqF2sg/SHHANeBoMMXFlS9r0OXKDssYE1M5O43wUY/9BVPeIvfH2zmEbBfseqN9gBQZzXkg== - dependencies: - ansi-colors "4.1.1" - browser-stdout "1.3.1" - chokidar "3.5.3" - debug "4.3.4" - diff "5.0.0" - escape-string-regexp "4.0.0" - find-up "5.0.0" - glob "7.2.0" - he "1.2.0" - js-yaml "4.1.0" - log-symbols "4.1.0" - minimatch "5.0.1" - ms "2.1.3" - nanoid "3.3.3" - serialize-javascript "6.0.0" - strip-json-comments "3.1.1" - supports-color "8.1.1" - workerpool "6.2.1" - yargs "16.2.0" - yargs-parser "20.2.4" - yargs-unparser "2.0.0" - -module-error@^1.0.1, module-error@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/module-error/-/module-error-1.0.2.tgz#8d1a48897ca883f47a45816d4fb3e3c6ba404d86" - integrity sha512-0yuvsqSCv8LbaOKhnsQ/T5JhyFlCYLPXK3U2sgV10zoKQwzs/MyfuQUOZQ1V/6OCOJsK/TRgNVrPuPDqtdMFtA== - -ms@2.1.2: - version "2.1.2" - resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" - integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== - -ms@2.1.3, ms@^2.1.1: - version "2.1.3" - resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" - integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== - -nan@^2.17.0: - version "2.18.0" - resolved "https://registry.yarnpkg.com/nan/-/nan-2.18.0.tgz#26a6faae7ffbeb293a39660e88a76b82e30b7554" - integrity sha512-W7tfG7vMOGtD30sHoZSSc/JVYiyDPEyQVso/Zz+/uQd0B0L46gtC+pHha5FFMRpil6fm/AoEcRWyOVi4+E/f8w== - -nanoid@3.3.3: - version "3.3.3" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.3.tgz#fd8e8b7aa761fe807dba2d1b98fb7241bb724a25" - integrity sha512-p1sjXuopFs0xg+fPASzQ28agW1oHD7xDsd9Xkf3T15H3c/cifrFHVwrh74PdoklAPi+i7MdRsE47vm2r6JoB+w== - -napi-macros@^2.2.2: - version "2.2.2" - resolved "https://registry.yarnpkg.com/napi-macros/-/napi-macros-2.2.2.tgz#817fef20c3e0e40a963fbf7b37d1600bd0201044" - integrity sha512-hmEVtAGYzVQpCKdbQea4skABsdXW4RUh5t5mJ2zzqowJS2OyXZTU1KhDVFhx+NlWZ4ap9mqR9TcDO3LTTttd+g== - -node-addon-api@^2.0.0: - version "2.0.2" - resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-2.0.2.tgz#432cfa82962ce494b132e9d72a15b29f71ff5d32" - integrity sha512-Ntyt4AIXyaLIuMHF6IOoTakB3K+RWxwtsHNRxllEoA6vPwP9o4866g6YWDLUdnucilZhmkxiHwHr11gAENw+QA== - -node-fetch@^2.6.0: - version "2.7.0" - resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.7.0.tgz#d0f0fa6e3e2dc1d27efcd8ad99d550bda94d187d" - integrity sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A== - dependencies: - whatwg-url "^5.0.0" - -node-gyp-build@^4.2.0, node-gyp-build@^4.3.0: - version "4.6.1" - resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-4.6.1.tgz#24b6d075e5e391b8d5539d98c7fc5c210cac8a3e" - integrity sha512-24vnklJmyRS8ViBNI8KbtK/r/DmXQMRiOMXTNz2nrTnAYUwjmEEbnnpB/+kt+yWRv73bPsSPRFddrcIbAxSiMQ== - -normalize-path@^3.0.0, normalize-path@~3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" - integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== - -obliterator@^2.0.0: - version "2.0.4" - resolved "https://registry.yarnpkg.com/obliterator/-/obliterator-2.0.4.tgz#fa650e019b2d075d745e44f1effeb13a2adbe816" - integrity sha512-lgHwxlxV1qIg1Eap7LgIeoBWIMFibOjbrYPIPJZcI1mmGAI2m3lNYpK12Y+GBdPQ0U1hRwSord7GIaawz962qQ== - -once@^1.3.0, once@^1.3.1, once@^1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" - integrity sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w== - dependencies: - wrappy "1" - -os-tmpdir@~1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" - integrity sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g== - -p-limit@^1.1.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-1.3.0.tgz#b86bd5f0c25690911c7590fcbfc2010d54b3ccb8" - integrity sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q== - dependencies: - p-try "^1.0.0" - -p-limit@^3.0.2: - version "3.1.0" - resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-3.1.0.tgz#e1daccbe78d0d1388ca18c64fea38e3e57e3706b" - integrity sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== - dependencies: - yocto-queue "^0.1.0" - -p-locate@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-2.0.0.tgz#20a0103b222a70c8fd39cc2e580680f3dde5ec43" - integrity sha512-nQja7m7gSKuewoVRen45CtVfODR3crN3goVQ0DDZ9N3yHxgpkuBhZqsaiotSQRrADUrne346peY7kT3TSACykg== - dependencies: - p-limit "^1.1.0" - -p-locate@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-5.0.0.tgz#83c8315c6785005e3bd021839411c9e110e6d834" - integrity sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw== - dependencies: - p-limit "^3.0.2" - -p-map@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/p-map/-/p-map-4.0.0.tgz#bb2f95a5eda2ec168ec9274e06a747c3e2904d2b" - integrity sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ== - dependencies: - aggregate-error "^3.0.0" - -p-try@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/p-try/-/p-try-1.0.0.tgz#cbc79cdbaf8fd4228e13f621f2b1a237c1b207b3" - integrity sha512-U1etNYuMJoIz3ZXSrrySFjsXQTWOx2/jdi86L+2pRvph/qMKL6sbcCYdH23fqsbm8TH2Gn0OybpT4eSFlCVHww== - -path-exists@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515" - integrity sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ== - -path-exists@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" - integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== - -path-is-absolute@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" - integrity sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg== - -path-parse@^1.0.6: - version "1.0.7" - resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" - integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== - -pbkdf2@^3.0.17: - version "3.1.2" - resolved "https://registry.yarnpkg.com/pbkdf2/-/pbkdf2-3.1.2.tgz#dd822aa0887580e52f1a039dc3eda108efae3075" - integrity sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA== - dependencies: - create-hash "^1.1.2" - create-hmac "^1.1.4" - ripemd160 "^2.0.1" - safe-buffer "^5.0.1" - sha.js "^2.4.8" - -picomatch@^2.0.4, picomatch@^2.2.1: - version "2.3.1" - resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" - integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== - -process-nextick-args@~2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.1.tgz#7820d9b16120cc55ca9ae7792680ae7dba6d7fe2" - integrity sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag== - -pump@^1.0.0: - version "1.0.3" - resolved "https://registry.yarnpkg.com/pump/-/pump-1.0.3.tgz#5dfe8311c33bbf6fc18261f9f34702c47c08a954" - integrity sha512-8k0JupWme55+9tCVE+FS5ULT3K6AbgqrGa58lTT49RpyfwwcGedHqaC5LlQNdEAumn/wFsu6aPwkuPMioy8kqw== - dependencies: - end-of-stream "^1.1.0" - once "^1.3.1" - -pump@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/pump/-/pump-3.0.0.tgz#b4a2116815bde2f4e1ea602354e8c75565107a64" - integrity sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww== - dependencies: - end-of-stream "^1.1.0" - once "^1.3.1" - -queue-microtask@^1.2.2, queue-microtask@^1.2.3: - version "1.2.3" - resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243" - integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A== - -randombytes@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/randombytes/-/randombytes-2.1.0.tgz#df6f84372f0270dc65cdf6291349ab7a473d4f2a" - integrity sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ== - dependencies: - safe-buffer "^5.1.0" - -raw-body@^2.4.1: - version "2.5.2" - resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.5.2.tgz#99febd83b90e08975087e8f1f9419a149366b68a" - integrity sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA== - dependencies: - bytes "3.1.2" - http-errors "2.0.0" - iconv-lite "0.4.24" - unpipe "1.0.0" - -readable-stream@^2.2.2, readable-stream@^2.3.0, readable-stream@^2.3.5: - version "2.3.8" - resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.8.tgz#91125e8042bba1b9887f49345f6277027ce8be9b" - integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA== - dependencies: - core-util-is "~1.0.0" - inherits "~2.0.3" - isarray "~1.0.0" - process-nextick-args "~2.0.0" - safe-buffer "~5.1.1" - string_decoder "~1.1.1" - util-deprecate "~1.0.1" - -readable-stream@^3.1.1, readable-stream@^3.4.0, readable-stream@^3.5.0, readable-stream@^3.6.0: - version "3.6.2" - resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.2.tgz#56a9b36ea965c00c5a93ef31eb111a0f11056967" - integrity sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA== - dependencies: - inherits "^2.0.3" - string_decoder "^1.1.1" - util-deprecate "^1.0.1" - -readable-stream@~1.0.26-4: - version "1.0.34" - resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c" - integrity sha512-ok1qVCJuRkNmvebYikljxJA/UEsKwLl2nI1OmaqAu4/UE+h0wKCHok4XkL/gvi39OacXvw59RJUOFUkDib2rHg== - dependencies: - core-util-is "~1.0.0" - inherits "~2.0.1" - isarray "0.0.1" - string_decoder "~0.10.x" - -readdirp@~3.6.0: - version "3.6.0" - resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.6.0.tgz#74a370bd857116e245b29cc97340cd431a02a6c7" - integrity sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA== - dependencies: - picomatch "^2.2.1" - -require-directory@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" - integrity sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q== - -require-from-string@^2.0.0: - version "2.0.2" - resolved "https://registry.yarnpkg.com/require-from-string/-/require-from-string-2.0.2.tgz#89a7fdd938261267318eafe14f9c32e598c36909" - integrity sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw== - -resolve@1.17.0: - version "1.17.0" - resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.17.0.tgz#b25941b54968231cc2d1bb76a79cb7f2c0bf8444" - integrity sha512-ic+7JYiV8Vi2yzQGFWOkiZD5Z9z7O2Zhm9XMaTxdJExKasieFCr+yXZ/WmXsckHiKl12ar0y6XiXDx3m4RHn1w== - dependencies: - path-parse "^1.0.6" - -rimraf@^2.2.8: - version "2.7.1" - resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.7.1.tgz#35797f13a7fdadc566142c29d4f07ccad483e3ec" - integrity sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w== - dependencies: - glob "^7.1.3" - -ripemd160@^2.0.0, ripemd160@^2.0.1: - version "2.0.2" - resolved "https://registry.yarnpkg.com/ripemd160/-/ripemd160-2.0.2.tgz#a1c1a6f624751577ba5d07914cbc92850585890c" - integrity sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA== - dependencies: - hash-base "^3.0.0" - inherits "^2.0.1" - -rlp@^2.2.3: - version "2.2.7" - resolved "https://registry.yarnpkg.com/rlp/-/rlp-2.2.7.tgz#33f31c4afac81124ac4b283e2bd4d9720b30beaf" - integrity sha512-d5gdPmgQ0Z+AklL2NVXr/IoSjNZFfTVvQWzL/AM2AOcSzYP2xjlb0AC8YyCLc41MSNf6P6QVtjgPdmVtzb+4lQ== - dependencies: - bn.js "^5.2.0" - -run-parallel-limit@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/run-parallel-limit/-/run-parallel-limit-1.1.0.tgz#be80e936f5768623a38a963262d6bef8ff11e7ba" - integrity sha512-jJA7irRNM91jaKc3Hcl1npHsFLOXOoTkPCUL1JEa1R82O2miplXXRaGdjW/KM/98YQWDhJLiSs793CnXfblJUw== - dependencies: - queue-microtask "^1.2.2" - -rustbn.js@~0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/rustbn.js/-/rustbn.js-0.2.0.tgz#8082cb886e707155fd1cb6f23bd591ab8d55d0ca" - integrity sha512-4VlvkRUuCJvr2J6Y0ImW7NvTCriMi7ErOAqWk1y69vAdoNIzCF3yPmgeNzx+RQTLEDFq5sHfscn1MwHxP9hNfA== - -safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@^5.2.0, safe-buffer@~5.2.0: - version "5.2.1" - resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" - integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== - -safe-buffer@~5.1.0, safe-buffer@~5.1.1: - version "5.1.2" - resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" - integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== - -"safer-buffer@>= 2.1.2 < 3", safer-buffer@~2.1.0: - version "2.1.2" - resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" - integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== - -scrypt-js@3.0.1, scrypt-js@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/scrypt-js/-/scrypt-js-3.0.1.tgz#d314a57c2aef69d1ad98a138a21fe9eafa9ee312" - integrity sha512-cdwTTnqPu0Hyvf5in5asVdZocVDTNRmR7XEcJuIzMjJeSHybHl7vpB66AzwTaIg6CLSbtjcxc8fqcySfnTkccA== - -secp256k1@^4.0.1: - version "4.0.3" - resolved "https://registry.yarnpkg.com/secp256k1/-/secp256k1-4.0.3.tgz#c4559ecd1b8d3c1827ed2d1b94190d69ce267303" - integrity sha512-NLZVf+ROMxwtEj3Xa562qgv2BK5e2WNmXPiOdVIPLgs6lyTzMvBq0aWTYMI5XCP9jZMVKOcqZLw/Wc4vDkuxhA== - dependencies: - elliptic "^6.5.4" - node-addon-api "^2.0.0" - node-gyp-build "^4.2.0" - -semver@^5.5.0: - version "5.7.2" - resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.2.tgz#48d55db737c3287cd4835e17fa13feace1c41ef8" - integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g== - -semver@^6.3.0: - version "6.3.1" - resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" - integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== - -serialize-javascript@6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-6.0.0.tgz#efae5d88f45d7924141da8b5c3a7a7e663fefeb8" - integrity sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag== - dependencies: - randombytes "^2.1.0" - -setimmediate@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.5.tgz#290cbb232e306942d7d7ea9b83732ab7856f8285" - integrity sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA== - -setprototypeof@1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.2.0.tgz#66c9a24a73f9fc28cbe66b09fed3d33dcaf1b424" - integrity sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw== - -sha.js@^2.4.0, sha.js@^2.4.8: - version "2.4.11" - resolved "https://registry.yarnpkg.com/sha.js/-/sha.js-2.4.11.tgz#37a5cf0b81ecbc6943de109ba2960d1b26584ae7" - integrity sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ== - dependencies: - inherits "^2.0.1" - safe-buffer "^5.0.1" - -solc@0.7.3: - version "0.7.3" - resolved "https://registry.yarnpkg.com/solc/-/solc-0.7.3.tgz#04646961bd867a744f63d2b4e3c0701ffdc7d78a" - integrity sha512-GAsWNAjGzIDg7VxzP6mPjdurby3IkGCjQcM8GFYZT6RyaoUZKmMU6Y7YwG+tFGhv7dwZ8rmR4iwFDrrD99JwqA== - dependencies: - command-exists "^1.2.8" - commander "3.0.2" - follow-redirects "^1.12.1" - fs-extra "^0.30.0" - js-sha3 "0.8.0" - memorystream "^0.3.1" - require-from-string "^2.0.0" - semver "^5.5.0" - tmp "0.0.33" - -source-map-support@^0.5.13: - version "0.5.21" - resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.21.tgz#04fe7c7f9e1ed2d662233c28cb2b35b9f63f6e4f" - integrity sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w== - dependencies: - buffer-from "^1.0.0" - source-map "^0.6.0" - -source-map@^0.6.0: - version "0.6.1" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" - integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== - -split-ca@^1.0.0, split-ca@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/split-ca/-/split-ca-1.0.1.tgz#6c83aff3692fa61256e0cd197e05e9de157691a6" - integrity sha512-Q5thBSxp5t8WPTTJQS59LrGqOZqOsrhDGDVm8azCqIBjSBd7nd9o2PM+mDulQQkh8h//4U6hFZnc/mul8t5pWQ== - -ssh2@^1.11.0: - version "1.14.0" - resolved "https://registry.yarnpkg.com/ssh2/-/ssh2-1.14.0.tgz#8f68440e1b768b66942c9e4e4620b2725b3555bb" - integrity sha512-AqzD1UCqit8tbOKoj6ztDDi1ffJZ2rV2SwlgrVVrHPkV5vWqGJOVp5pmtj18PunkPJAuKQsnInyKV+/Nb2bUnA== - dependencies: - asn1 "^0.2.6" - bcrypt-pbkdf "^1.0.2" - optionalDependencies: - cpu-features "~0.0.8" - nan "^2.17.0" - -stacktrace-parser@^0.1.10: - version "0.1.10" - resolved "https://registry.yarnpkg.com/stacktrace-parser/-/stacktrace-parser-0.1.10.tgz#29fb0cae4e0d0b85155879402857a1639eb6051a" - integrity sha512-KJP1OCML99+8fhOHxwwzyWrlUuVX5GQ0ZpJTd1DFXhdkrvg1szxfHhawXUZ3g9TkXORQd4/WG68jMlQZ2p8wlg== - dependencies: - type-fest "^0.7.1" - -statuses@2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/statuses/-/statuses-2.0.1.tgz#55cb000ccf1d48728bd23c685a063998cf1a1b63" - integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ== - -string-width@^4.1.0, string-width@^4.2.0: - version "4.2.3" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" - integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== - dependencies: - emoji-regex "^8.0.0" - is-fullwidth-code-point "^3.0.0" - strip-ansi "^6.0.1" - -string_decoder@^1.1.1: - version "1.3.0" - resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e" - integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA== - dependencies: - safe-buffer "~5.2.0" - -string_decoder@~0.10.x: - version "0.10.31" - resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94" - integrity sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ== - -string_decoder@~1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8" - integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg== - dependencies: - safe-buffer "~5.1.0" - -strip-ansi@^6.0.0, strip-ansi@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" - integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== - dependencies: - ansi-regex "^5.0.1" - -strip-hex-prefix@1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/strip-hex-prefix/-/strip-hex-prefix-1.0.0.tgz#0c5f155fef1151373377de9dbb588da05500e36f" - integrity sha512-q8d4ue7JGEiVcypji1bALTos+0pWtyGlivAWyPuTkHzuTCJqrK9sWxYQZUq6Nq3cuyv3bm734IhHvHtGGURU6A== - dependencies: - is-hex-prefixed "1.0.0" - -strip-json-comments@3.1.1: - version "3.1.1" - resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.1.1.tgz#31f1281b3832630434831c310c01cccda8cbe006" - integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== - -supports-color@8.1.1: - version "8.1.1" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-8.1.1.tgz#cd6fc17e28500cff56c1b86c0a7fd4a54a73005c" - integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== - dependencies: - has-flag "^4.0.0" - -supports-color@^5.3.0: - version "5.5.0" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" - integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== - dependencies: - has-flag "^3.0.0" - -supports-color@^7.1.0: - version "7.2.0" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" - integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== - dependencies: - has-flag "^4.0.0" - -tar-fs@~1.16.3: - version "1.16.3" - resolved "https://registry.yarnpkg.com/tar-fs/-/tar-fs-1.16.3.tgz#966a628841da2c4010406a82167cbd5e0c72d509" - integrity sha512-NvCeXpYx7OsmOh8zIOP/ebG55zZmxLE0etfWRbWok+q2Qo8x/vOR/IJT1taADXPe+jsiu9axDb3X4B+iIgNlKw== - dependencies: - chownr "^1.0.1" - mkdirp "^0.5.1" - pump "^1.0.0" - tar-stream "^1.1.2" - -tar-fs@~2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/tar-fs/-/tar-fs-2.0.1.tgz#e44086c1c60d31a4f0cf893b1c4e155dabfae9e2" - integrity sha512-6tzWDMeroL87uF/+lin46k+Q+46rAJ0SyPGz7OW7wTgblI273hsBqk2C1j0/xNadNLKDTUL9BukSjB7cwgmlPA== - dependencies: - chownr "^1.1.1" - mkdirp-classic "^0.5.2" - pump "^3.0.0" - tar-stream "^2.0.0" - -tar-stream@^1.1.2: - version "1.6.2" - resolved "https://registry.yarnpkg.com/tar-stream/-/tar-stream-1.6.2.tgz#8ea55dab37972253d9a9af90fdcd559ae435c555" - integrity sha512-rzS0heiNf8Xn7/mpdSVVSMAWAoy9bfb1WOTYC78Z0UQKeKa/CWS8FOq0lKGNa8DWKAn9gxjCvMLYc5PGXYlK2A== - dependencies: - bl "^1.0.0" - buffer-alloc "^1.2.0" - end-of-stream "^1.0.0" - fs-constants "^1.0.0" - readable-stream "^2.3.0" - to-buffer "^1.1.1" - xtend "^4.0.0" - -tar-stream@^2.0.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/tar-stream/-/tar-stream-2.2.0.tgz#acad84c284136b060dc3faa64474aa9aebd77287" - integrity sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ== - dependencies: - bl "^4.0.3" - end-of-stream "^1.4.1" - fs-constants "^1.0.0" - inherits "^2.0.3" - readable-stream "^3.1.1" - -"through@>=2.2.7 <3": - version "2.3.8" - resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" - integrity sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg== - -tmp@0.0.33: - version "0.0.33" - resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" - integrity sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw== - dependencies: - os-tmpdir "~1.0.2" - -to-buffer@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/to-buffer/-/to-buffer-1.1.1.tgz#493bd48f62d7c43fcded313a03dcadb2e1213a80" - integrity sha512-lx9B5iv7msuFYE3dytT+KE5tap+rNYw+K4jVkb9R/asAb+pbBSM17jtunHplhBe6RRJdZx3Pn2Jph24O32mOVg== - -to-regex-range@^5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" - integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ== - dependencies: - is-number "^7.0.0" - -toidentifier@1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.1.tgz#3be34321a88a820ed1bd80dfaa33e479fbb8dd35" - integrity sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA== - -tr46@~0.0.3: - version "0.0.3" - resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a" - integrity sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw== - -tslib@^1.9.3: - version "1.14.1" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" - integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== - -tsort@0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/tsort/-/tsort-0.0.1.tgz#e2280f5e817f8bf4275657fd0f9aebd44f5a2786" - integrity sha512-Tyrf5mxF8Ofs1tNoxA13lFeZ2Zrbd6cKbuH3V+MQ5sb6DtBj5FjrXVsRWT8YvNAQTqNoz66dz1WsbigI22aEnw== - -tweetnacl-util@^0.15.1: - version "0.15.1" - resolved "https://registry.yarnpkg.com/tweetnacl-util/-/tweetnacl-util-0.15.1.tgz#b80fcdb5c97bcc508be18c44a4be50f022eea00b" - integrity sha512-RKJBIj8lySrShN4w6i/BonWp2Z/uxwC3h4y7xsRrpP59ZboCd0GpEVsOnMDYLMmKBpYhb5TgHzZXy7wTfYFBRw== - -tweetnacl@^0.14.3: - version "0.14.5" - resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" - integrity sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA== - -tweetnacl@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-1.0.3.tgz#ac0af71680458d8a6378d0d0d050ab1407d35596" - integrity sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw== - -type-fest@^0.21.3: - version "0.21.3" - resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.21.3.tgz#d260a24b0198436e133fa26a524a6d65fa3b2e37" - integrity sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w== - -type-fest@^0.7.1: - version "0.7.1" - resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.7.1.tgz#8dda65feaf03ed78f0a3f9678f1869147f7c5c48" - integrity sha512-Ne2YiiGN8bmrmJJEuTWTLJR32nh/JdL1+PSicowtNb0WFpn59GK8/lfD61bVtzguz7b3PBt74nxpv/Pw5po5Rg== - -typedarray@^0.0.6: - version "0.0.6" - resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777" - integrity sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA== - -undici-types@~5.26.4: - version "5.26.5" - resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617" - integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA== - -undici@^5.14.0: - version "5.27.2" - resolved "https://registry.yarnpkg.com/undici/-/undici-5.27.2.tgz#a270c563aea5b46cc0df2550523638c95c5d4411" - integrity sha512-iS857PdOEy/y3wlM3yRp+6SNQQ6xU0mmZcwRSriqk+et/cwWAtwmIGf6WkoDN2EK/AMdCO/dfXzIwi+rFMrjjQ== - dependencies: - "@fastify/busboy" "^2.0.0" - -universalify@^0.1.0: - version "0.1.2" - resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.1.2.tgz#b646f69be3942dabcecc9d6639c80dc105efaa66" - integrity sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg== - -unpipe@1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec" - integrity sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ== - -util-deprecate@^1.0.1, util-deprecate@~1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" - integrity sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw== - -uuid@^8.3.2: - version "8.3.2" - resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.3.2.tgz#80d5b5ced271bb9af6c445f21a1a04c606cefbe2" - integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg== - -webidl-conversions@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" - integrity sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ== - -whatwg-url@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-5.0.0.tgz#966454e8765462e37644d3626f6742ce8b70965d" - integrity sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw== - dependencies: - tr46 "~0.0.3" - webidl-conversions "^3.0.0" - -workerpool@6.2.1: - version "6.2.1" - resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.2.1.tgz#46fc150c17d826b86a008e5a4508656777e9c343" - integrity sha512-ILEIE97kDZvF9Wb9f6h5aXK4swSlKGUcOEGiIYb2OOu/IrDU9iwj0fD//SsA6E5ibwJxpEvhullJY4Sl4GcpAw== - -wrap-ansi@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" - integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== - dependencies: - ansi-styles "^4.0.0" - string-width "^4.1.0" - strip-ansi "^6.0.0" - -wrappy@1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" - integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ== - -ws@7.4.6: - version "7.4.6" - resolved "https://registry.yarnpkg.com/ws/-/ws-7.4.6.tgz#5654ca8ecdeee47c33a9a4bf6d28e2be2980377c" - integrity sha512-YmhHDO4MzaDLB+M9ym/mDA5z0naX8j7SIlT8f8z+I0VtzsRbekxEutHSme7NPS2qE8StCYQNUnfWdXta/Yu85A== - -ws@^7.4.6: - version "7.5.9" - resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.9.tgz#54fa7db29f4c7cec68b1ddd3a89de099942bb591" - integrity sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q== - -xtend@^4.0.0: - version "4.0.2" - resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54" - integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ== - -y18n@^5.0.5: - version "5.0.8" - resolved "https://registry.yarnpkg.com/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55" - integrity sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA== - -yallist@^3.0.2: - version "3.1.1" - resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd" - integrity sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g== - -yargs-parser@20.2.4: - version "20.2.4" - resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.4.tgz#b42890f14566796f85ae8e3a25290d205f154a54" - integrity sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA== - -yargs-parser@^20.2.2: - version "20.2.9" - resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.9.tgz#2eb7dc3b0289718fc295f362753845c41a0c94ee" - integrity sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w== - -yargs-unparser@2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/yargs-unparser/-/yargs-unparser-2.0.0.tgz#f131f9226911ae5d9ad38c432fe809366c2325eb" - integrity sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA== - dependencies: - camelcase "^6.0.0" - decamelize "^4.0.0" - flat "^5.0.2" - is-plain-obj "^2.1.0" - -yargs@16.2.0: - version "16.2.0" - resolved "https://registry.yarnpkg.com/yargs/-/yargs-16.2.0.tgz#1c82bf0f6b6a66eafce7ef30e376f49a12477f66" - integrity sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw== - dependencies: - cliui "^7.0.2" - escalade "^3.1.1" - get-caller-file "^2.0.5" - require-directory "^2.1.1" - string-width "^4.2.0" - y18n "^5.0.5" - yargs-parser "^20.2.2" - -yocto-queue@^0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" - integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== diff --git a/etc/env/base/chain.toml b/etc/env/base/chain.toml index 903696e3a819..6d1fdae53cee 100644 --- a/etc/env/base/chain.toml +++ b/etc/env/base/chain.toml @@ -90,8 +90,8 @@ fee_model_version = "V2" validation_computational_gas_limit = 300000 save_call_traces = true -bootloader_hash = "0x010008c37ecadea8b003884eb9d81fdfb7161b3b309504e5318f15da19c500d8" -default_aa_hash = "0x0100055da70d970f98ca4677a4b2fcecef5354f345cc5c6d13a78339e5fd87a9" +bootloader_hash = "0x010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b678" +default_aa_hash = "0x0100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe30" protective_reads_persistence_enabled = false diff --git a/etc/env/base/contract_verifier.toml b/etc/env/base/contract_verifier.toml index 223e6f166f8b..952b8c70823d 100644 --- a/etc/env/base/contract_verifier.toml +++ b/etc/env/base/contract_verifier.toml @@ -1,7 +1,4 @@ [contract_verifier] compilation_timeout = 30 -polling_interval = 1000 prometheus_port = 3314 port = 3070 -url = "http://127.0.0.1:3070" -threads_per_server = 128 diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index dbadbbc2c776..735da993058b 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -26,8 +26,8 @@ RECURSION_NODE_LEVEL_VK_HASH = "0x1186ec268d49f1905f8d9c1e9d39fc33e98c74f91d91a2 RECURSION_LEAF_LEVEL_VK_HASH = "0x101e08b00193e529145ee09823378ef51a3bc8966504064f1f6ba3f1ba863210" RECURSION_CIRCUITS_SET_VKS_HASH = "0x18c1639094f58177409186e8c48d9f577c9410901d2f1d486b3e7d6cf553ae4c" GENESIS_TX_HASH = "0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" -GENESIS_ROOT = "0x28a7e67393021f957572495f8fdadc2c477ae3f4f413ae18c16cff6ee65680e2" -GENESIS_BATCH_COMMITMENT = "0xc57085380434970021d87774b377ce1bb12f5b6064af11595e70011965747def" +GENESIS_ROOT = "0x7275936e5a0063b159d5d22734931fea07871e8d57e564d61ef56e4a6ee23e5c" +GENESIS_BATCH_COMMITMENT = "0xf5f9a5abe62e8a6e0cb2d34d27435c3e5a8fbd7e2e54ca1d108fc58cb86c708a" PRIORITY_TX_MAX_GAS_LIMIT = 72000000 DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT = 10000000 GENESIS_ROLLUP_LEAF_INDEX = "54" diff --git a/etc/env/base/external_price_api.toml b/etc/env/base/external_price_api.toml index bb22e86c432b..fe88e71e82a2 100644 --- a/etc/env/base/external_price_api.toml +++ b/etc/env/base/external_price_api.toml @@ -6,5 +6,5 @@ source = "forced" [external_price_api_client.forced] -numerator = 3 -denominator = 2 +numerator = 314 +denominator = 1000 diff --git a/etc/env/base/proof_data_handler.toml b/etc/env/base/proof_data_handler.toml index 7a1999a03c31..b56ac26fb177 100644 --- a/etc/env/base/proof_data_handler.toml +++ b/etc/env/base/proof_data_handler.toml @@ -1,4 +1,5 @@ [proof_data_handler] http_port = 3320 proof_generation_timeout_in_secs = 18000 +tee_proof_generation_timeout_in_secs = 600 tee_support = true diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index d8bef020c642..18107f0d4f93 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -1,6 +1,6 @@ # Environment configuration for the Rust code # We don't provide the group name like `[rust]` here, because we don't want -# these variables to be prefixed during the compiling. +# these variables to be prefixed during the compiling. # `RUST_LOG` environment variable for `env_logger` # Here we use TOML multiline strings: newlines will be trimmed. @@ -26,7 +26,6 @@ zksync_node_sync=info,\ zksync_node_consensus=info,\ zksync_contract_verification_server=info,\ zksync_node_api_server=info,\ -zksync_tee_verifier_input_producer=info,\ zksync_consensus_bft=info,\ zksync_consensus_network=info,\ zksync_consensus_storage=info,\ diff --git a/etc/env/ecosystems/mainnet.yaml b/etc/env/ecosystems/mainnet.yaml index 7d4266e8b761..f7b09150793b 100644 --- a/etc/env/ecosystems/mainnet.yaml +++ b/etc/env/ecosystems/mainnet.yaml @@ -1,3 +1,5 @@ +create2_factory_addr: 0xce0042b868300000d44a59004da54a005ffdcf9f +create2_factory_salt: '0x0000000000000000000000000000000000000000000000000000000000000000' ecosystem_contracts: bridgehub_proxy_addr: 0x303a465B659cBB0ab36eE643eA362c509EEb5213 state_transition_proxy_addr: 0xc2eE6b6af7d616f6e27ce7F4A451Aedc2b0F5f5C @@ -17,3 +19,6 @@ l1: verifier_addr: 0x70F3FBf8a427155185Ec90BED8a3434203de9604 validator_timelock_addr: 0x5D8ba173Dc6C3c90C8f7C04C9288BeF5FDbAd06E base_token_addr: '0x0000000000000000000000000000000000000000' +l2: + testnet_paymaster_addr: '0x0000000000000000000000000000000000000000' + default_l2_upgrader: '0x0000000000000000000000000000000000000000' diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 017d79dbe736..a4005e9477a8 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -79,11 +79,8 @@ operations_manager: delay_interval: 100 contract_verifier: compilation_timeout: 240 - polling_interval: 1000 prometheus_port: 3318 port: 3070 - url: http://127.0.0.1:3070 - threads_per_server: 128 circuit_breaker: sync_interval_ms: 120000 @@ -106,7 +103,7 @@ eth: max_eth_tx_data_size: 120000 aggregated_proof_sizes: [ 1 ] max_aggregated_tx_gas: 15000000 - max_acceptable_priority_fee_in_gwei: 100000000000 + max_acceptable_priority_fee_in_gwei: 100000000000 # typo: value is in wei (100 gwei) pubdata_sending_mode: BLOBS gas_adjuster: default_priority_fee_per_gas: 1000000000 @@ -169,6 +166,7 @@ witness_vector_generator: data_handler: http_port: 3320 proof_generation_timeout_in_secs: 18000 + tee_proof_generation_timeout_in_secs: 600 tee_support: true prover_gateway: api_url: http://127.0.0.1:3320 @@ -298,8 +296,8 @@ base_token_adjuster: external_price_api_client: source: "forced" client_timeout_ms: 10000 - forced_numerator: 3 - forced_denominator: 2 + forced_numerator: 314 + forced_denominator: 1000 house_keeper: @@ -312,7 +310,7 @@ prometheus: observability: log_format: plain - log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=debug,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" + log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=debug,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" # Uncomment only if needed # sentry: # url: unset @@ -376,6 +374,9 @@ da_dispatcher: external_proof_integration_api: http_port: 3073 +timestamp_asserter: + min_time_till_end_sec: 60 + consensus: port: 3054 server_addr: "127.0.0.1:3054" diff --git a/etc/env/file_based/genesis.yaml b/etc/env/file_based/genesis.yaml index f09a37fb6a45..912e901e2c02 100644 --- a/etc/env/file_based/genesis.yaml +++ b/etc/env/file_based/genesis.yaml @@ -12,5 +12,5 @@ prover: dummy_verifier: true genesis_protocol_semantic_version: 0.25.0 l1_batch_commit_data_generator_mode: Rollup -# Uncomment to enable EVM emulation (requires to run genesis) +# TODO: uncomment once EVM emulator is present in the `contracts` submodule # evm_emulator_hash: 0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91 diff --git a/etc/env/file_based/overrides/mainnet.yaml b/etc/env/file_based/overrides/mainnet.yaml index 0600abf694c2..847f9ae98aa6 100644 --- a/etc/env/file_based/overrides/mainnet.yaml +++ b/etc/env/file_based/overrides/mainnet.yaml @@ -1,5 +1,6 @@ state_keeper: - block_commit_deadline_ms: 3600000 + # Default batch seal time deadline: 8 hours + block_commit_deadline_ms: 28000000 minimal_l2_gas_price: 45250000 eth: sender: @@ -10,6 +11,7 @@ eth: aggregated_block_prove_deadline: 300 aggregated_block_execute_deadline: 300 timestamp_criteria_max_allowed_lag: 104000 # 29h + l1_batch_min_age_before_execute_seconds: 76000 # 21h wait_confirmations: null gas_adjuster: pricing_formula_parameter_a: 1.06 diff --git a/etc/env/file_based/overrides/testnet.yaml b/etc/env/file_based/overrides/testnet.yaml index e4da1ac96e26..4643a963ed7f 100644 --- a/etc/env/file_based/overrides/testnet.yaml +++ b/etc/env/file_based/overrides/testnet.yaml @@ -1,5 +1,6 @@ state_keeper: - block_commit_deadline_ms: 3600000 + # Default batch seal time deadline: 8 hours + block_commit_deadline_ms: 28000000 minimal_l2_gas_price: 25000000 eth: sender: @@ -10,6 +11,7 @@ eth: aggregated_block_prove_deadline: 300 aggregated_block_execute_deadline: 300 timestamp_criteria_max_allowed_lag: 104000 # 29h + l1_batch_min_age_before_execute_seconds: 1500 # 25m wait_confirmations: null gas_adjuster: pricing_formula_parameter_a: 1.1 diff --git a/etc/env/file_based/overrides/tests/integration.yaml b/etc/env/file_based/overrides/tests/integration.yaml new file mode 100644 index 000000000000..6ad031e29458 --- /dev/null +++ b/etc/env/file_based/overrides/tests/integration.yaml @@ -0,0 +1,4 @@ +experimental_vm: + # Use the shadow VM mode everywhere to catch divergences as early as possible + state_keeper_fast_vm_mode: SHADOW + api_fast_vm_mode: SHADOW diff --git a/etc/env/file_based/overrides/tests/loadtest-new.yaml b/etc/env/file_based/overrides/tests/loadtest-new.yaml index 2167f7347e09..e66625636b1f 100644 --- a/etc/env/file_based/overrides/tests/loadtest-new.yaml +++ b/etc/env/file_based/overrides/tests/loadtest-new.yaml @@ -1,7 +1,11 @@ db: merkle_tree: mode: LIGHTWEIGHT +api: + web3_json_rpc: + estimate_gas_optimize_search: true experimental_vm: state_keeper_fast_vm_mode: NEW + api_fast_vm_mode: NEW mempool: delay_interval: 50 diff --git a/etc/env/file_based/overrides/tests/loadtest-old.yaml b/etc/env/file_based/overrides/tests/loadtest-old.yaml index a2d66d1cf4a7..7b1a35870187 100644 --- a/etc/env/file_based/overrides/tests/loadtest-old.yaml +++ b/etc/env/file_based/overrides/tests/loadtest-old.yaml @@ -3,5 +3,6 @@ db: mode: LIGHTWEIGHT experimental_vm: state_keeper_fast_vm_mode: OLD + api_fast_vm_mode: OLD mempool: delay_interval: 50 diff --git a/etc/lint-config/ignore.yaml b/etc/lint-config/ignore.yaml index b4456a6c3fd4..009d0dbb0946 100644 --- a/etc/lint-config/ignore.yaml +++ b/etc/lint-config/ignore.yaml @@ -24,5 +24,7 @@ dirs: [ "artifacts-zk", "cache-zk", "contracts/", - "era-observability" + "era-observability", + "docs/js", + "prover/docs/js" ] diff --git a/etc/multivm_bootloaders/vm_gateway/commit b/etc/multivm_bootloaders/vm_gateway/commit new file mode 100644 index 000000000000..a3547f577034 --- /dev/null +++ b/etc/multivm_bootloaders/vm_gateway/commit @@ -0,0 +1 @@ +a8bf0ca28d43899882a2e123e2fdf1379f0fd656 diff --git a/etc/multivm_bootloaders/vm_gateway/fee_estimate.yul/fee_estimate.yul.zbin b/etc/multivm_bootloaders/vm_gateway/fee_estimate.yul/fee_estimate.yul.zbin new file mode 100644 index 000000000000..fb6017f69cf0 Binary files /dev/null and b/etc/multivm_bootloaders/vm_gateway/fee_estimate.yul/fee_estimate.yul.zbin differ diff --git a/etc/multivm_bootloaders/vm_gateway/gas_test.yul/gas_test.yul.zbin b/etc/multivm_bootloaders/vm_gateway/gas_test.yul/gas_test.yul.zbin new file mode 100644 index 000000000000..c1726d8301ff Binary files /dev/null and b/etc/multivm_bootloaders/vm_gateway/gas_test.yul/gas_test.yul.zbin differ diff --git a/etc/multivm_bootloaders/vm_gateway/playground_batch.yul/playground_batch.yul.zbin b/etc/multivm_bootloaders/vm_gateway/playground_batch.yul/playground_batch.yul.zbin new file mode 100644 index 000000000000..b154276bd611 Binary files /dev/null and b/etc/multivm_bootloaders/vm_gateway/playground_batch.yul/playground_batch.yul.zbin differ diff --git a/etc/multivm_bootloaders/vm_gateway/proved_batch.yul/proved_batch.yul.zbin b/etc/multivm_bootloaders/vm_gateway/proved_batch.yul/proved_batch.yul.zbin new file mode 100644 index 000000000000..2506ce065d74 Binary files /dev/null and b/etc/multivm_bootloaders/vm_gateway/proved_batch.yul/proved_batch.yul.zbin differ diff --git a/etc/upgrades/1728066632-protocol-defense/mainnet/transactions.json b/etc/upgrades/1728066632-protocol-defense/mainnet/transactions.json index 694faf985f31..189447edfc18 100644 --- a/etc/upgrades/1728066632-protocol-defense/mainnet/transactions.json +++ b/etc/upgrades/1728066632-protocol-defense/mainnet/transactions.json @@ -249,5 +249,5 @@ "initAddress": "0xA7Cc1Bf4d8404d14caB00d2b2F0b2F4198CddDfF", "initCalldata": "0x08284e57000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000001ac0010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe3000000000000000000000000006aa7a7b07108f7c5539645e32dd5c21cbf9eb660000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ae00000000000000000000000000000000000000000000000000000000000001b00000000000000000000000000000000000000000000000000000000006723e4d5000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000018c000000000000000000000000000000000000000000000000000000000000018e0000000000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000019200000000000000000000000000000000000000000000000000000000000001624e9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" }, - "stmSetChainCreationCalldata": "0xf6370c7b00000000000000000000000000000000000000000000000000000000000000200000000000000000000000006e2bc597f1e83f9fc7c1f69157f2c1247687397128a7e67393021f957572495f8fdadc2c477ae3f4f413ae18c16cff6ee65680e20000000000000000000000000000000000000000000000000000000000000036c57085380434970021d87774b377ce1bb12f5b6064af11595e70011965747def00000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000060000000000000000000000000d43b0b525e2cb6005f39ced7b69d3437f0fb840f0000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000008800000000000000000000000000000000000000000000000000000000000000a2000000000000000000000000090c0a0a63d7ff47bfaa1e9f8fa554dabc986504a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f0000000000000000000000000000000000000000000000000000000000000000000000000000000081754d2e48e3e553ba6dfd193fc72b3a0c6076d900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d000000000000000000000000000000000000000000000000000000000000000000000000000000005575218cecd370e1d630d1adb03c254b0b376821000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de3500000000000000000000000000000000000000000000000000000000000000000000000000000000bb13642f795014e0eac2b0d52ecd5162ecb667120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da430000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001c000000000000000000000000006aa7a7b07108f7c5539645e32dd5c21cbf9eb66f520cd5b37e74e19fdb369c8d676a04dce8a19457497ac6686d2bb95d94109c8f9664f4324c1400fa5c3822d667f30e873f53f1b8033180cd15fe41c1e2355c60000000000000000000000000000000000000000000000000000000000000000010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe3000000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f4240000000000000000000000000000000000000000000000000000000000001d4c000000000000000000000000000000000000000000000000000000000000182b80000000000000000000000000000000000000000000000000000000004c4b400000000000000000000000000000000000000000000000000000000000ee6b280000000000000000000000000273bdccdd979510adf4fb801d92f64b243c01fe2" + "stmSetChainCreationCalldata": "0xf6370c7b00000000000000000000000000000000000000000000000000000000000000200000000000000000000000006e2bc597f1e83f9fc7c1f69157f2c124768739717275936e5a0063b159d5d22734931fea07871e8d57e564d61ef56e4a6ee23e5c0000000000000000000000000000000000000000000000000000000000000036f5f9a5abe62e8a6e0cb2d34d27435c3e5a8fbd7e2e54ca1d108fc58cb86c708a00000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000060000000000000000000000000d43b0b525e2cb6005f39ced7b69d3437f0fb840f0000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000008800000000000000000000000000000000000000000000000000000000000000a2000000000000000000000000090c0a0a63d7ff47bfaa1e9f8fa554dabc986504a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f0000000000000000000000000000000000000000000000000000000000000000000000000000000081754d2e48e3e553ba6dfd193fc72b3a0c6076d900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d000000000000000000000000000000000000000000000000000000000000000000000000000000005575218cecd370e1d630d1adb03c254b0b376821000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de3500000000000000000000000000000000000000000000000000000000000000000000000000000000bb13642f795014e0eac2b0d52ecd5162ecb667120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da430000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001c000000000000000000000000006aa7a7b07108f7c5539645e32dd5c21cbf9eb66f520cd5b37e74e19fdb369c8d676a04dce8a19457497ac6686d2bb95d94109c8f9664f4324c1400fa5c3822d667f30e873f53f1b8033180cd15fe41c1e2355c60000000000000000000000000000000000000000000000000000000000000000010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe3000000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f4240000000000000000000000000000000000000000000000000000000000001d4c000000000000000000000000000000000000000000000000000000000000182b80000000000000000000000000000000000000000000000000000000004c4b400000000000000000000000000000000000000000000000000000000000ee6b280000000000000000000000000273bdccdd979510adf4fb801d92f64b243c01fe2" } \ No newline at end of file diff --git a/etc/upgrades/1728066632-protocol-defense/testnet/transactions.json b/etc/upgrades/1728066632-protocol-defense/testnet/transactions.json index ffdfa48e5033..84c05680e046 100644 --- a/etc/upgrades/1728066632-protocol-defense/testnet/transactions.json +++ b/etc/upgrades/1728066632-protocol-defense/testnet/transactions.json @@ -249,5 +249,5 @@ "initAddress": "0xA7Cc1Bf4d8404d14caB00d2b2F0b2F4198CddDfF", "initCalldata": "0x08284e57000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000001ac0010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe300000000000000000000000009a2ebeb3676d4c593443fd0bb1bb9977c73b41180000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ae00000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000671a8ee8000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000018c000000000000000000000000000000000000000000000000000000000000018e0000000000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000019200000000000000000000000000000000000000000000000000000000000001624e9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" }, - "stmSetChainCreationCalldata": "0xf6370c7b00000000000000000000000000000000000000000000000000000000000000200000000000000000000000006e2bc597f1e83f9fc7c1f69157f2c1247687397128a7e67393021f957572495f8fdadc2c477ae3f4f413ae18c16cff6ee65680e20000000000000000000000000000000000000000000000000000000000000036c57085380434970021d87774b377ce1bb12f5b6064af11595e70011965747def00000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000060000000000000000000000000d43b0b525e2cb6005f39ced7b69d3437f0fb840f0000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000008800000000000000000000000000000000000000000000000000000000000000a2000000000000000000000000090c0a0a63d7ff47bfaa1e9f8fa554dabc986504a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f0000000000000000000000000000000000000000000000000000000000000000000000000000000081754d2e48e3e553ba6dfd193fc72b3a0c6076d900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000b2d1ca55203e96b1d1e6f034805431b7ac983185000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de3500000000000000000000000000000000000000000000000000000000000000000000000000000000bb13642f795014e0eac2b0d52ecd5162ecb667120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da430000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001c00000000000000000000000009a2ebeb3676d4c593443fd0bb1bb9977c73b4118f520cd5b37e74e19fdb369c8d676a04dce8a19457497ac6686d2bb95d94109c8f9664f4324c1400fa5c3822d667f30e873f53f1b8033180cd15fe41c1e2355c60000000000000000000000000000000000000000000000000000000000000000010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe3000000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f4240000000000000000000000000000000000000000000000000000000000001d4c000000000000000000000000000000000000000000000000000000000000182b80000000000000000000000000000000000000000000000000000000004c4b400000000000000000000000000000000000000000000000000000000000ee6b2800000000000000000000000009c30b772c02c1008efcf47cb282c5e4cdde4c2f1" + "stmSetChainCreationCalldata": "0xf6370c7b00000000000000000000000000000000000000000000000000000000000000200000000000000000000000006e2bc597f1e83f9fc7c1f69157f2c124768739717275936e5a0063b159d5d22734931fea07871e8d57e564d61ef56e4a6ee23e5c0000000000000000000000000000000000000000000000000000000000000036f5f9a5abe62e8a6e0cb2d34d27435c3e5a8fbd7e2e54ca1d108fc58cb86c708a00000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000060000000000000000000000000d43b0b525e2cb6005f39ced7b69d3437f0fb840f0000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000008800000000000000000000000000000000000000000000000000000000000000a2000000000000000000000000090c0a0a63d7ff47bfaa1e9f8fa554dabc986504a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f0000000000000000000000000000000000000000000000000000000000000000000000000000000081754d2e48e3e553ba6dfd193fc72b3a0c6076d900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000b2d1ca55203e96b1d1e6f034805431b7ac983185000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de3500000000000000000000000000000000000000000000000000000000000000000000000000000000bb13642f795014e0eac2b0d52ecd5162ecb667120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da430000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001c00000000000000000000000009a2ebeb3676d4c593443fd0bb1bb9977c73b4118f520cd5b37e74e19fdb369c8d676a04dce8a19457497ac6686d2bb95d94109c8f9664f4324c1400fa5c3822d667f30e873f53f1b8033180cd15fe41c1e2355c60000000000000000000000000000000000000000000000000000000000000000010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe3000000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f4240000000000000000000000000000000000000000000000000000000000001d4c000000000000000000000000000000000000000000000000000000000000182b80000000000000000000000000000000000000000000000000000000004c4b400000000000000000000000000000000000000000000000000000000000ee6b2800000000000000000000000009c30b772c02c1008efcf47cb282c5e4cdde4c2f1" } \ No newline at end of file diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts index bd7df8ab456b..71fe3ca3d2e2 100644 --- a/infrastructure/protocol-upgrade/src/transaction.ts +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -204,6 +204,8 @@ export function prepareUpgradeCalldata( genesisBatchHash: string, genesisIndexRepeatedStorageChanges: number, genesisBatchCommitment: string, + chainInitAddress: string, + chainInitCalldata: string, prepareDirectOperation?: boolean, chainId?: string ) { @@ -215,8 +217,8 @@ export function prepareUpgradeCalldata( let chainCreationDiamondCut: DiamondCutData = { facetCuts: facetCuts.filter((cut) => cut.action == Action.Add), - initAddress: genesisUpgradeAddress, - initCalldata: '0x' + initAddress: chainInitAddress, + initCalldata: chainInitCalldata }; let chainCreationParams: ChainCreationParams = { @@ -284,6 +286,8 @@ export function buildDefaultUpgradeTx( genesisBatchHash, genesisIndexRepeatedStorageChanges, genesisBatchCommitment, + chainInitAddress, + chainInitCalldata, prepareDirectOperation?, chainId? ) { @@ -375,6 +379,8 @@ export function buildDefaultUpgradeTx( genesisBatchHash, genesisIndexRepeatedStorageChanges, genesisBatchCommitment, + chainInitAddress, + chainInitCalldata, prepareDirectOperation, chainId ); @@ -427,6 +433,8 @@ command .option('--genesis-batch-hash ') .option('--genesis-index-repeated-storage-changes ') .option('--genesis-batch-commitment ') + .option('--chain-init-address ') + .option('--chain-init-calldata ') .action(async (options) => { buildDefaultUpgradeTx( options.environment, @@ -440,6 +448,8 @@ command options.genesisBatchHash, options.genesisIndexRepeatedStorageChanges, options.genesisBatchCommitment, + options.chainInitAddress, + options.chainInitCalldata, options.prepareDirectOperation, options.chainId ); diff --git a/infrastructure/zk/src/compiler.ts b/infrastructure/zk/src/compiler.ts index 9a90154909ba..881908eeacea 100644 --- a/infrastructure/zk/src/compiler.ts +++ b/infrastructure/zk/src/compiler.ts @@ -2,7 +2,6 @@ import { Command } from 'commander'; import * as utils from 'utils'; export async function compileTestContracts() { - await utils.spawn('yarn workspace contracts-test-data build'); await utils.spawn('yarn ts-integration build'); await utils.spawn('yarn ts-integration build-yul'); } diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 063777a671b5..dc716a0b257b 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -16,7 +16,8 @@ const IMAGES = [ 'prover-job-monitor', 'proof-fri-gpu-compressor', 'snapshots-creator', - 'verified-sources-fetcher' + 'verified-sources-fetcher', + 'prover-autoscaler' ]; const DOCKER_REGISTRIES = ['us-docker.pkg.dev/matterlabs-infra/matterlabs-docker', 'matterlabs']; @@ -76,7 +77,8 @@ function defaultTagList(image: string, imageTagSha: string, imageTagShaTS: strin 'contract-verifier', 'prover-fri-gateway', 'prover-job-monitor', - 'snapshots-creator' + 'snapshots-creator', + 'prover-autoscaler' ].includes(image) ? ['latest', 'latest2.0', `2.0-${imageTagSha}`, `${imageTagSha}`, `2.0-${imageTagShaTS}`, `${imageTagShaTS}`] : [`latest2.0`, 'latest']; diff --git a/package.json b/package.json index af745160c30d..9e3428e614cc 100644 --- a/package.json +++ b/package.json @@ -9,7 +9,6 @@ "contracts/l1-contracts", "contracts/l2-contracts", "contracts/system-contracts", - "etc/contracts-test-data", "etc/ERC20", "etc/utils", "infrastructure/zk", diff --git a/prover/CHANGELOG.md b/prover/CHANGELOG.md index 0201ce4a920f..6687b1450ba0 100644 --- a/prover/CHANGELOG.md +++ b/prover/CHANGELOG.md @@ -1,5 +1,77 @@ # Changelog +## [17.0.0](https://github.com/matter-labs/zksync-era/compare/prover-v16.6.0...prover-v17.0.0) (2024-10-31) + + +### ⚠ BREAKING CHANGES + +* force minor bump for provers release -- v25 ([#3208](https://github.com/matter-labs/zksync-era/issues/3208)) + +### Features + +* force minor bump for provers release -- v25 ([#3208](https://github.com/matter-labs/zksync-era/issues/3208)) ([6851e35](https://github.com/matter-labs/zksync-era/commit/6851e353f01cde5d385403dedb68bcae76d06966)) + +## [16.6.0](https://github.com/matter-labs/zksync-era/compare/prover-v16.5.0...prover-v16.6.0) (2024-10-31) + + +### Features + +* (DB migration) Rename recursion_scheduler_level_vk_hash to snark_wrapper_vk_hash ([#2809](https://github.com/matter-labs/zksync-era/issues/2809)) ([64f9551](https://github.com/matter-labs/zksync-era/commit/64f95514c99f95da2a19a97ff064c29a97efc22f)) +* Add initial version prover_autoscaler ([#2993](https://github.com/matter-labs/zksync-era/issues/2993)) ([ebf9604](https://github.com/matter-labs/zksync-era/commit/ebf9604c5ab2a1cae1ffd2f9c922f35a1d0ad876)) +* added seed_peers to consensus global config ([#2920](https://github.com/matter-labs/zksync-era/issues/2920)) ([e9d1d90](https://github.com/matter-labs/zksync-era/commit/e9d1d905f1ce86f9de2cf39d79be4b5aada4a81d)) +* attester committees data extractor (BFT-434) ([#2684](https://github.com/matter-labs/zksync-era/issues/2684)) ([92dde03](https://github.com/matter-labs/zksync-era/commit/92dde039ee8a0bc08e2019b7fa6f243a34d9816f)) +* Bump crypto and protocol deps ([#2825](https://github.com/matter-labs/zksync-era/issues/2825)) ([a5ffaf1](https://github.com/matter-labs/zksync-era/commit/a5ffaf1b4e291d6f09ba8c1f224f5900665bffc4)) +* **circuit_prover:** Add circuit prover ([#2908](https://github.com/matter-labs/zksync-era/issues/2908)) ([48317e6](https://github.com/matter-labs/zksync-era/commit/48317e640a00b016bf7bf782cc94fccaf077ed6d)) +* **consensus:** Support for syncing blocks before consensus genesis over p2p network ([#3040](https://github.com/matter-labs/zksync-era/issues/3040)) ([d3edc3d](https://github.com/matter-labs/zksync-era/commit/d3edc3d817c151ed00d4fa822fdae0a746e33356)) +* **da-clients:** add secrets ([#2954](https://github.com/matter-labs/zksync-era/issues/2954)) ([f4631e4](https://github.com/matter-labs/zksync-era/commit/f4631e4466de620cc1401b326d864cdb8b48a05d)) +* gateway preparation ([#3006](https://github.com/matter-labs/zksync-era/issues/3006)) ([16f2757](https://github.com/matter-labs/zksync-era/commit/16f275756cd28024a6b11ac1ac327eb5b8b446e1)) +* Integrate tracers and implement circuits tracer in vm2 ([#2653](https://github.com/matter-labs/zksync-era/issues/2653)) ([87b02e3](https://github.com/matter-labs/zksync-era/commit/87b02e3ab5c1f61d59dd0f0eefa9ec33a7b55488)) +* Move prover data to /home/popzxc/workspace/current/zksync-era/prover/data ([#2778](https://github.com/matter-labs/zksync-era/issues/2778)) ([62e4d46](https://github.com/matter-labs/zksync-era/commit/62e4d4619dde9d6bd9102f1410eea75b0e2051c5)) +* Prover e2e test ([#2975](https://github.com/matter-labs/zksync-era/issues/2975)) ([0edd796](https://github.com/matter-labs/zksync-era/commit/0edd7962429b3530ae751bd7cc947c97193dd0ca)) +* **prover:** add CLI option to run prover with max allocation ([#2794](https://github.com/matter-labs/zksync-era/issues/2794)) ([35e4cae](https://github.com/matter-labs/zksync-era/commit/35e4cae29314fa98ce356a875e08b3e869a31036)) +* **prover:** Add endpoint to PJM to get queue reports ([#2918](https://github.com/matter-labs/zksync-era/issues/2918)) ([2cec83f](https://github.com/matter-labs/zksync-era/commit/2cec83f26e0b9309387135ca43718af4fcd6f6b1)) +* **prover:** Add error to panic message of prover ([#2807](https://github.com/matter-labs/zksync-era/issues/2807)) ([6e057eb](https://github.com/matter-labs/zksync-era/commit/6e057ebf277e0cbc7964079c01ef0348e006a53b)) +* **prover:** Add min_provers and dry_run features. Improve metrics and test. ([#3129](https://github.com/matter-labs/zksync-era/issues/3129)) ([7c28964](https://github.com/matter-labs/zksync-era/commit/7c289649b7b3c418c7193a35b51c264cf4970f3c)) +* **prover:** Add scale failure events watching and pods eviction. ([#3175](https://github.com/matter-labs/zksync-era/issues/3175)) ([dd166f8](https://github.com/matter-labs/zksync-era/commit/dd166f887b11a8dfb039a0030dda923c481f67af)) +* **prover:** Add sending scale requests for Scaler targets ([#3194](https://github.com/matter-labs/zksync-era/issues/3194)) ([767c5bc](https://github.com/matter-labs/zksync-era/commit/767c5bc6a62c402c099abe93b7dbecbb59e4acb7)) +* **prover:** Add support for scaling WGs and compressor ([#3179](https://github.com/matter-labs/zksync-era/issues/3179)) ([c41db9e](https://github.com/matter-labs/zksync-era/commit/c41db9ecec1c21b80969604f703ac6990f6f3434)) +* **prover:** Autoscaler sends scale request to appropriate agents. ([#3150](https://github.com/matter-labs/zksync-era/issues/3150)) ([bfedac0](https://github.com/matter-labs/zksync-era/commit/bfedac03b53055c6e2d5fa6bd6bdc78e2cb1724c)) +* **prover:** Extract keystore into a separate crate ([#2797](https://github.com/matter-labs/zksync-era/issues/2797)) ([e239260](https://github.com/matter-labs/zksync-era/commit/e239260d77b55fcce0b1f485029762a605cdb6d0)) +* **prover:** Optimize setup keys loading ([#2847](https://github.com/matter-labs/zksync-era/issues/2847)) ([19887ef](https://github.com/matter-labs/zksync-era/commit/19887ef21a8bbd26977353f8ee277b711850dfd2)) +* **prover:** Refactor WitnessGenerator ([#2845](https://github.com/matter-labs/zksync-era/issues/2845)) ([934634b](https://github.com/matter-labs/zksync-era/commit/934634b149377c730ec39e904508c40628ff4019)) +* **prover:** Update witness generator to zkevm_test_harness 0.150.6 ([#3029](https://github.com/matter-labs/zksync-era/issues/3029)) ([2151c28](https://github.com/matter-labs/zksync-era/commit/2151c2832498ca6e7ee1eee0bfdf6a0568345fee)) +* **prover:** Use query macro instead string literals for queries ([#2930](https://github.com/matter-labs/zksync-era/issues/2930)) ([1cf959d](https://github.com/matter-labs/zksync-era/commit/1cf959da12d2b6369f34a67ccc2575b4b173d75a)) +* **prover:** WG refactoring [#3](https://github.com/matter-labs/zksync-era/issues/3) ([#2942](https://github.com/matter-labs/zksync-era/issues/2942)) ([df68762](https://github.com/matter-labs/zksync-era/commit/df6876221936a44fa2fb8c80c01d043d229621fc)) +* **prover:** WitnessGenerator refactoring [#2](https://github.com/matter-labs/zksync-era/issues/2) ([#2899](https://github.com/matter-labs/zksync-era/issues/2899)) ([36e5340](https://github.com/matter-labs/zksync-era/commit/36e534091f73f4e3ce86e322fb20842cda6a6b61)) +* Refactor metrics/make API use binaries ([#2735](https://github.com/matter-labs/zksync-era/issues/2735)) ([8ed086a](https://github.com/matter-labs/zksync-era/commit/8ed086afecfcad30bfda44fc4d29a00beea71cca)) +* Remove prover db from house keeper ([#2795](https://github.com/matter-labs/zksync-era/issues/2795)) ([85b7346](https://github.com/matter-labs/zksync-era/commit/85b734664b4306e988da07005860a7ea0fb7d22d)) +* **tee:** use hex serialization for RPC responses ([#2887](https://github.com/matter-labs/zksync-era/issues/2887)) ([abe0440](https://github.com/matter-labs/zksync-era/commit/abe0440811ae4daf4a0f307922a282e9664308e0)) +* **utils:** Rework locate_workspace, introduce Workspace type ([#2830](https://github.com/matter-labs/zksync-era/issues/2830)) ([d256092](https://github.com/matter-labs/zksync-era/commit/d2560928cc67b40a97a5497ac8542915bf6f91a9)) +* vm2 tracers can access storage ([#3114](https://github.com/matter-labs/zksync-era/issues/3114)) ([e466b52](https://github.com/matter-labs/zksync-era/commit/e466b52948e3c4ed1cb5af4fd999a52028e4d216)) +* **vm:** Do not panic on VM divergence ([#2705](https://github.com/matter-labs/zksync-era/issues/2705)) ([7aa5721](https://github.com/matter-labs/zksync-era/commit/7aa5721d22e253d05d369a60d5bcacbf52021c48)) +* **vm:** EVM emulator support – base ([#2979](https://github.com/matter-labs/zksync-era/issues/2979)) ([deafa46](https://github.com/matter-labs/zksync-era/commit/deafa460715334a77edf9fe8aa76fa90029342c4)) +* **vm:** Extract batch executor to separate crate ([#2702](https://github.com/matter-labs/zksync-era/issues/2702)) ([b82dfa4](https://github.com/matter-labs/zksync-era/commit/b82dfa4d29fce107223c3638fe490b5cb0f28d8c)) +* **zk_toolbox:** `zk_supervisor prover` subcommand ([#2820](https://github.com/matter-labs/zksync-era/issues/2820)) ([3506731](https://github.com/matter-labs/zksync-era/commit/3506731d1702bdec8c6b5b41cabca9a257f0269b)) +* **zk_toolbox:** Add external_node consensus support ([#2821](https://github.com/matter-labs/zksync-era/issues/2821)) ([4a10d7d](https://github.com/matter-labs/zksync-era/commit/4a10d7d9554d6c1aa2f4fc46557d40baaad8ff2f)) +* **zk_toolbox:** Add SQL format for zk supervisor ([#2950](https://github.com/matter-labs/zksync-era/issues/2950)) ([540e5d7](https://github.com/matter-labs/zksync-era/commit/540e5d7554f54e80d52f1bfae37e03ca8f787baf)) +* **zk_toolbox:** deploy legacy bridge ([#2837](https://github.com/matter-labs/zksync-era/issues/2837)) ([93b4e08](https://github.com/matter-labs/zksync-era/commit/93b4e08257802d11108870d867dd59fa35e52733)) +* **zk_toolbox:** Redesign zk_toolbox commands ([#3003](https://github.com/matter-labs/zksync-era/issues/3003)) ([114834f](https://github.com/matter-labs/zksync-era/commit/114834f357421c62d596a1954fac8ce615cfde49)) +* **zkstack_cli:** Build dependencies at zkstack build time ([#3157](https://github.com/matter-labs/zksync-era/issues/3157)) ([724d9a9](https://github.com/matter-labs/zksync-era/commit/724d9a9c7f2127263845b640c843e751fd3c21ae)) + + +### Bug Fixes + +* allow compilation under current toolchain ([#3176](https://github.com/matter-labs/zksync-era/issues/3176)) ([89eadd3](https://github.com/matter-labs/zksync-era/commit/89eadd353c4fb84bb815ae56b29f4ff3467b80f3)) +* **api:** Return correct flat call tracer ([#2917](https://github.com/matter-labs/zksync-era/issues/2917)) ([218646a](https://github.com/matter-labs/zksync-era/commit/218646aa1c56200f4ffee99b7f83366e2689354f)) +* count SECP256 precompile to account validation gas limit as well ([#2859](https://github.com/matter-labs/zksync-era/issues/2859)) ([fee0c2a](https://github.com/matter-labs/zksync-era/commit/fee0c2ad08a5ab4a04252765b367eb9fbb1f3db7)) +* Fix Doc lint. ([#3158](https://github.com/matter-labs/zksync-era/issues/3158)) ([c79949b](https://github.com/matter-labs/zksync-era/commit/c79949b8ffde9867b961192afa6c815b44865ae4)) +* ignore unknown fields in rpc json response ([#2962](https://github.com/matter-labs/zksync-era/issues/2962)) ([692ea73](https://github.com/matter-labs/zksync-era/commit/692ea73f75a5fb9db2b4ac33ad24d20568638742)) +* **prover:** Do not exit on missing watcher data. ([#3119](https://github.com/matter-labs/zksync-era/issues/3119)) ([76ed6d9](https://github.com/matter-labs/zksync-era/commit/76ed6d966051c56f8e894c18461c5ea284b1a74b)) +* **prover:** fix setup_metadata_to_setup_data_key ([#2875](https://github.com/matter-labs/zksync-era/issues/2875)) ([4ae5a93](https://github.com/matter-labs/zksync-era/commit/4ae5a93e9e96cd0cd529baf9ffa78c1b21a9c4b1)) +* **prover:** Run for zero queue to allow scaling down to 0 ([#3115](https://github.com/matter-labs/zksync-era/issues/3115)) ([bbe1919](https://github.com/matter-labs/zksync-era/commit/bbe191937fa5c5711a7164fd4f0c2ae65cda0833)) +* **tee_verifier:** correctly initialize storage for re-execution ([#3017](https://github.com/matter-labs/zksync-era/issues/3017)) ([9d88373](https://github.com/matter-labs/zksync-era/commit/9d88373f1b745c489e98e5ef542644a70e815498)) +* **vm:** Prepare new VM for use in API server and fix divergences ([#2994](https://github.com/matter-labs/zksync-era/issues/2994)) ([741b77e](https://github.com/matter-labs/zksync-era/commit/741b77e080f75c6a93d3ee779b1c9ce4297618f9)) + ## [16.5.0](https://github.com/matter-labs/zksync-era/compare/prover-v16.4.0...prover-v16.5.0) (2024-08-28) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 1d584a473d96..af249b435a6b 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -304,7 +304,7 @@ version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f0e249228c6ad2d240c2dc94b714d711629d52bad946075d8e9b2f5391f0703" dependencies = [ - "bindgen 0.69.4", + "bindgen", "cc", "cmake", "dunce", @@ -326,7 +326,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", "itoa", "matchit", @@ -341,7 +341,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -461,29 +461,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bindgen" -version = "0.59.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" -dependencies = [ - "bitflags 1.3.2", - "cexpr", - "clang-sys", - "clap 2.34.0", - "env_logger 0.9.3", - "lazy_static", - "lazycell", - "log", - "peeking_take_while", - "proc-macro2 1.0.85", - "quote 1.0.36", - "regex", - "rustc-hash", - "shlex", - "which", -] - [[package]] name = "bindgen" version = "0.69.4" @@ -674,9 +651,9 @@ dependencies = [ [[package]] name = "boojum-cuda" -version = "0.150.9" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f9a6d958dd58a0899737e5a1fc6597aefcf7980bf8be5be5329e701cbd45ca" +checksum = "4b63a717789f92f16fd566c78655d64017c690be59e473c3e769080c975a1f9e" dependencies = [ "boojum", "cmake", @@ -717,7 +694,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" dependencies = [ "memchr", - "regex-automata 0.4.6", + "regex-automata 0.4.8", "serde", ] @@ -822,11 +799,11 @@ dependencies = [ [[package]] name = "circuit_definitions" -version = "0.150.5" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b532214f063e5e0ee5c0fc1d3afd56dec541efa68b8985f14cc55cc324f4c48" +checksum = "76be9ee6e75f1f948d175ab9820ecc7189f72154c95ca503a1974012356f5363" dependencies = [ - "circuit_encodings 0.150.5", + "circuit_encodings 0.150.7", "crossbeam", "derivative", "seq-macro", @@ -872,14 +849,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.5" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e67617688c66640c84f9b98ff26d48f7898dca4faeb45241a4f21ec333788e7b" +checksum = "2501cc688ef391013019495ae7035cfd54f86987e36d10f73976ce4c5d413c5a" dependencies = [ "derivative", "serde", - "zk_evm 0.150.5", - "zkevm_circuits 0.150.5", + "zk_evm 0.150.7", + "zkevm_circuits 0.150.7", ] [[package]] @@ -939,11 +916,11 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.5" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21017310971d4a051e4a52ad70eed11d1ae69defeca8314f73a3a4bad16705a9" +checksum = "917d27db531fdd98a51e42ea465bc097f48cc849e7fad68d7856087d15125be1" dependencies = [ - "circuit_encodings 0.150.5", + "circuit_encodings 0.150.7", "derivative", "rayon", "serde", @@ -1094,6 +1071,15 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5241cd7938b1b415942e943ea96f615953d500b50347b505b0b507080bad5a6f" +[[package]] +name = "const-decoder" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b381abde2cdc1bc3817e394b24e05667a2dc89f37570cbd34d9c397d99e56e3f" +dependencies = [ + "compile-fmt", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -1628,6 +1614,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "either" version = "1.12.0" @@ -1701,6 +1699,26 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "enum-ordinalize" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +dependencies = [ + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "enum_dispatch" version = "0.3.13" @@ -1764,9 +1782,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "era_cudart" -version = "0.150.9" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51f0d6e329b2c11d134c3140951209be968ef316ed64ddde75640eaed7f10264" +checksum = "ad950752eeb44f8938be405b95a1630f82e903f4a7adda355d92aad135fcd382" dependencies = [ "bitflags 2.6.0", "era_cudart_sys", @@ -1775,9 +1793,9 @@ dependencies = [ [[package]] name = "era_cudart_sys" -version = "0.150.9" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "060e8186234c7a281021fb95614e06e94e1fc7ab78938360a5c27af0f8fc6105" +checksum = "c38607d52509b5db97cc4447c8644d6c5ca84f22ff8a9254f984669b1eb82ed4" dependencies = [ "serde_json", ] @@ -2607,6 +2625,16 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +[[package]] +name = "humantime-serde" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" +dependencies = [ + "humantime", + "serde", +] + [[package]] name = "hyper" version = "0.14.29" @@ -2633,9 +2661,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.3.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" +checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" dependencies = [ "bytes", "futures-channel", @@ -2662,7 +2690,7 @@ dependencies = [ "futures-util", "headers", "http 1.1.0", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-rustls", "hyper-util", "pin-project-lite", @@ -2680,7 +2708,7 @@ checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", "log", "rustls", @@ -2697,7 +2725,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", "pin-project-lite", "tokio", @@ -2725,7 +2753,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-util", "native-tls", "tokio", @@ -2735,20 +2763,19 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.5" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", "http-body 1.0.0", - "hyper 1.3.1", + "hyper 1.5.0", "pin-project-lite", "socket2", "tokio", - "tower", "tower-service", "tracing", ] @@ -3098,7 +3125,7 @@ dependencies = [ "async-trait", "base64 0.22.1", "http-body 1.0.0", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-rustls", "hyper-util", "jsonrpsee-core", @@ -3109,7 +3136,7 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "tower", + "tower 0.4.13", "tracing", "url", ] @@ -3229,9 +3256,9 @@ dependencies = [ [[package]] name = "kube" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa21063c854820a77c5d7f8deeb7ffa55246d8304e4bcd8cce2956752c6604f8" +checksum = "efffeb3df0bd4ef3e5d65044573499c0e4889b988070b08c50b25b1329289a1f" dependencies = [ "k8s-openapi", "kube-client", @@ -3242,9 +3269,9 @@ dependencies = [ [[package]] name = "kube-client" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c2355f5c9d8a11900e71a6fe1e47abd5ec45bf971eb4b162ffe97b46db9bb7" +checksum = "8bf471ece8ff8d24735ce78dac4d091e9fcb8d74811aeb6b75de4d1c3f5de0f1" dependencies = [ "base64 0.22.1", "bytes", @@ -3255,7 +3282,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-http-proxy", "hyper-rustls", "hyper-timeout", @@ -3266,23 +3293,23 @@ dependencies = [ "pem", "rustls", "rustls-pemfile 2.1.2", - "secrecy", + "secrecy 0.10.3", "serde", "serde_json", "serde_yaml", "thiserror", "tokio", "tokio-util", - "tower", + "tower 0.5.1", "tower-http", "tracing", ] [[package]] name = "kube-core" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3030bd91c9db544a50247e7d48d7db9cf633c172732dce13351854526b1e666" +checksum = "f42346d30bb34d1d7adc5c549b691bce7aa3a1e60254e68fab7e2d7b26fe3d77" dependencies = [ "chrono", "form_urlencoded", @@ -3298,9 +3325,9 @@ dependencies = [ [[package]] name = "kube-derive" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa98be978eddd70a773aa8e86346075365bfb7eb48783410852dbf7cb57f0c27" +checksum = "f9364e04cc5e0482136c6ee8b7fb7551812da25802249f35b3def7aaa31e82ad" dependencies = [ "darling 0.20.10", "proc-macro2 1.0.85", @@ -3311,16 +3338,16 @@ dependencies = [ [[package]] name = "kube-runtime" -version = "0.95.0" +version = "0.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5895cb8aa641ac922408f128b935652b34c2995f16ad7db0984f6caa50217914" +checksum = "d3fbf1f6ffa98e65f1d2a9a69338bb60605d46be7edf00237784b89e62c9bd44" dependencies = [ "ahash 0.8.11", "async-broadcast", "async-stream", "async-trait", "backoff", - "derivative", + "educe", "futures 0.3.30", "hashbrown 0.14.5", "json-patch", @@ -4103,12 +4130,6 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "pem" version = "3.0.4" @@ -4452,7 +4473,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.3", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -4798,14 +4819,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.6", - "regex-syntax 0.8.3", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -4819,13 +4840,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.3", + "regex-syntax 0.8.5", ] [[package]] @@ -4836,9 +4857,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "rend" @@ -4905,7 +4926,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-rustls", "hyper-tls 0.6.0", "hyper-util", @@ -5350,7 +5371,15 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ - "serde", + "zeroize", +] + +[[package]] +name = "secrecy" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e891af845473308773346dc847b2c23ee78fe442e0472ac50e22a18a93d3ae5a" +dependencies = [ "zeroize", ] @@ -5711,9 +5740,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shivini" -version = "0.150.9" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebb6d928451f0779f14da02ee9d51d4bde560328edc6471f0d5c5c11954345c4" +checksum = "9d2ac4440b6c23005c43a81cf064b9aa123fbeb992ac91cd04c7d485abb1fbea" dependencies = [ "bincode", "blake2 0.10.6", @@ -6482,9 +6511,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -6571,7 +6600,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.3.1", + "hyper 1.5.0", "hyper-timeout", "hyper-util", "percent-encoding", @@ -6580,7 +6609,7 @@ dependencies = [ "socket2", "tokio", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -6606,18 +6635,34 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-http" -version = "0.5.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "bitflags 2.6.0", "bytes", "http 1.1.0", "http-body 1.0.0", - "http-body-util", "mime", "pin-project-lite", "tower-layer", @@ -6627,15 +6672,15 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -6731,6 +6776,27 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "tracing-test" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "557b891436fe0d5e0e363427fc7f217abf9ccd510d5136549847bdcbcd011d68" +dependencies = [ + "tracing-core", + "tracing-subscriber", + "tracing-test-macro", +] + +[[package]] +name = "tracing-test-macro" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" +dependencies = [ + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "try-lock" version = "0.2.5" @@ -7479,9 +7545,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.5" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a6e69931f24db5cf333b714721e8d80ff88bfdb7da8c3dc7882612ffddb8d27" +checksum = "3cc74fbe2b45fd19e95c59ea792c795feebdb616ebaa463f0ac567f495f47387" dependencies = [ "anyhow", "lazy_static", @@ -7489,7 +7555,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.5", + "zk_evm_abstractions 0.150.7", ] [[package]] @@ -7520,22 +7586,22 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.5" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6b0720261ab55490fe3a96e96de30d5d7b277940b52ea7f52dbf564eb1748" +checksum = "37f333a3b059899df09e40deb041af881bc03e496fda5eec618ffb5e854ee7df" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.7", ] [[package]] name = "zkevm-assembly" -version = "0.150.5" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e99106038062537c05b4e6e7754d1bbba28ba16185a3e5ee5ad22e2f8be883bb" +checksum = "cf011a0c83cbfb175f1e60811f0e0cd56551c9e35df596a762556662c638deb9" dependencies = [ "env_logger 0.9.3", "hex", @@ -7548,7 +7614,7 @@ dependencies = [ "smallvec", "structopt", "thiserror", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.7", ] [[package]] @@ -7597,9 +7663,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.5" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784fa7cfb51e17c5ced112bca43da30b3468b2347b7af0427ad9638759fb140e" +checksum = "d06fb35b00699d25175a2ad447f86a9088af8b0bc698bb57086fb04c13e52eab" dependencies = [ "arrayvec 0.7.4", "boojum", @@ -7611,7 +7677,7 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.7", "zksync_cs_derive", ] @@ -7659,9 +7725,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.5" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79055eae1b6c1ab80793ed9d77d2964c9c896afa4b5dfed278cf58cd10acfe8f" +checksum = "b83f3b279248af4ca86dec20a54127f02110b45570f3f6c1d13df49ba75c28a5" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -7676,13 +7742,13 @@ dependencies = [ [[package]] name = "zkevm_test_harness" -version = "0.150.5" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "550f82d3b7448c35168dc13bfadbccd5fd306097b6e1ea01793151c1c9137a36" +checksum = "d9c801aa17e9009699aacf654588d6adfaeeb8a490b2d9121847c201e2766803" dependencies = [ "bincode", "circuit_definitions", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.7", "codegen", "crossbeam", "derivative", @@ -7703,11 +7769,10 @@ dependencies = [ [[package]] name = "zksync-gpu-ffi" -version = "0.150.9" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86511b3957adfe415ecdbd1ee01c51aa3ca131a607e61ca024976312f613b0f9" +checksum = "5688dc060456f6c1e790d589f3abd6d9e9a11eb393d7383fbeb23b55961951e0" dependencies = [ - "bindgen 0.59.2", "cmake", "crossbeam", "derivative", @@ -7719,9 +7784,9 @@ dependencies = [ [[package]] name = "zksync-gpu-prover" -version = "0.150.9" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e4c00f2db603d1b696bc2e9d822bb4c087050de5b65559067fc2232786cbc93" +checksum = "5714848e6f8361820346483246dd68b4e7fb05ec41dd6610a8b53fb5c3ca7f3a" dependencies = [ "bit-vec", "cfg-if", @@ -7736,9 +7801,9 @@ dependencies = [ [[package]] name = "zksync-wrapper-prover" -version = "0.150.9" +version = "0.151.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d58df1ec10e0d5eb58563bb01abda5ed185c9b9621502e361848ca40eb7868ac" +checksum = "52a6a1863818d939d445c53af57e53c222f11c2c94b9a94c3612dd938a3d983c" dependencies = [ "circuit_definitions", "zkevm_test_harness", @@ -7751,13 +7816,15 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", + "const-decoder 0.4.0", "ethabi", "hex", "num_enum 0.7.2", - "secrecy", + "secrecy 0.8.0", "serde", "serde_json", "serde_with", + "sha2 0.10.8", "strum", "thiserror", "tiny-keccak 2.0.2", @@ -7801,6 +7868,7 @@ dependencies = [ "tracing", "vise", "zkevm_test_harness", + "zksync_circuit_prover_service", "zksync_config", "zksync_core_leftovers", "zksync_env_config", @@ -7808,12 +7876,33 @@ dependencies = [ "zksync_prover_dal", "zksync_prover_fri_types", "zksync_prover_fri_utils", + "zksync_prover_job_processor", "zksync_prover_keystore", "zksync_queued_job_processor", "zksync_types", "zksync_utils", ] +[[package]] +name = "zksync_circuit_prover_service" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "shivini", + "tokio", + "tokio-util", + "tracing", + "vise", + "zkevm_test_harness", + "zksync_object_store", + "zksync_prover_dal", + "zksync_prover_fri_types", + "zksync_prover_job_processor", + "zksync_prover_keystore", + "zksync_types", +] + [[package]] name = "zksync_concurrency" version = "0.5.0" @@ -7839,14 +7928,9 @@ version = "0.1.0" dependencies = [ "anyhow", "rand 0.8.5", - "secrecy", + "secrecy 0.8.0", "serde", - "strum", - "strum_macros", - "time", "tracing", - "url", - "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -7934,11 +8018,11 @@ name = "zksync_contracts" version = "0.1.0" dependencies = [ "envy", - "ethabi", "hex", "once_cell", "serde", "serde_json", + "zksync_basic_types", "zksync_utils", ] @@ -7970,7 +8054,6 @@ dependencies = [ "sha2 0.10.8", "thiserror", "zksync_basic_types", - "zksync_utils", ] [[package]] @@ -8017,7 +8100,6 @@ dependencies = [ "zksync_protobuf_build", "zksync_system_constants", "zksync_types", - "zksync_utils", "zksync_vm_interface", ] @@ -8106,9 +8188,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.5" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb8a9c76c172a6d639855ee342b9a670e3ba472f5ae302f771b1c3ee777dc88" +checksum = "dc58af8e4e4ad1a851ffd2275e6a44ead0f15a7eaac9dc9d60a56b3b9c9b08e8" dependencies = [ "boojum", "derivative", @@ -8118,7 +8200,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.5", + "zkevm_circuits 0.150.7", ] [[package]] @@ -8154,7 +8236,7 @@ dependencies = [ "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.142.2", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.7", "ethabi", "hex", "itertools 0.10.5", @@ -8166,11 +8248,11 @@ dependencies = [ "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.5", + "zk_evm 0.150.7", "zksync_contracts", + "zksync_mini_merkle_tree", "zksync_system_constants", "zksync_types", - "zksync_utils", "zksync_vm2", "zksync_vm_interface", ] @@ -8218,7 +8300,7 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.7", "clap 4.5.4", "ctrlc", "futures 0.3.30", @@ -8291,10 +8373,9 @@ dependencies = [ "hex", "prost 0.12.6", "rand 0.8.5", - "secrecy", + "secrecy 0.8.0", "serde_json", "serde_yaml", - "time", "tracing", "zksync_basic_types", "zksync_config", @@ -8311,10 +8392,10 @@ dependencies = [ "async-trait", "axum", "chrono", - "clap 4.5.4", "ctrlc", "debug-map-sorted", "futures 0.3.30", + "humantime-serde", "k8s-openapi", "kube", "once_cell", @@ -8324,19 +8405,18 @@ dependencies = [ "rustls", "serde", "serde_json", + "serde_yaml", "structopt", "strum", - "time", + "strum_macros", "tokio", "tracing", "tracing-subscriber", + "tracing-test", "url", "vise", "zksync_config", - "zksync_core_leftovers", - "zksync_protobuf_config", "zksync_prover_job_monitor", - "zksync_types", "zksync_utils", "zksync_vlog", ] @@ -8444,13 +8524,13 @@ name = "zksync_prover_interface" version = "0.1.0" dependencies = [ "chrono", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.7", "serde", "serde_with", "strum", - "zksync_multivm", "zksync_object_store", "zksync_types", + "zksync_vm_interface", ] [[package]] @@ -8475,6 +8555,21 @@ dependencies = [ "zksync_vlog", ] +[[package]] +name = "zksync_prover_job_processor" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "futures 0.3.30", + "strum", + "tokio", + "tokio-stream", + "tokio-util", + "tracing", + "vise", +] + [[package]] name = "zksync_prover_keystore" version = "0.1.0" @@ -8533,7 +8628,6 @@ version = "0.1.0" dependencies = [ "once_cell", "zksync_basic_types", - "zksync_utils", ] [[package]] @@ -8552,7 +8646,6 @@ dependencies = [ "once_cell", "prost 0.12.6", "rlp", - "secp256k1", "serde", "serde_json", "serde_with", @@ -8560,14 +8653,12 @@ dependencies = [ "thiserror", "tracing", "zksync_basic_types", - "zksync_config", "zksync_contracts", "zksync_crypto_primitives", "zksync_mini_merkle_tree", "zksync_protobuf", "zksync_protobuf_build", "zksync_system_constants", - "zksync_utils", ] [[package]] @@ -8575,19 +8666,12 @@ name = "zksync_utils" version = "0.1.0" dependencies = [ "anyhow", - "bigdecimal", "futures 0.3.30", - "hex", - "num", "once_cell", "reqwest 0.12.5", - "serde", "serde_json", - "thiserror", "tokio", "tracing", - "zk_evm 0.133.0", - "zksync_basic_types", "zksync_vlog", ] @@ -8639,19 +8723,19 @@ dependencies = [ [[package]] name = "zksync_vm2" version = "0.2.1" -source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" +source = "git+https://github.com/matter-labs/vm2.git?rev=457d8a7eea9093af9440662e33e598c13ba41633#457d8a7eea9093af9440662e33e598c13ba41633" dependencies = [ "enum_dispatch", "primitive-types", - "zk_evm_abstractions 0.150.5", - "zkevm_opcode_defs 0.150.5", + "zk_evm_abstractions 0.150.7", + "zkevm_opcode_defs 0.150.7", "zksync_vm2_interface", ] [[package]] name = "zksync_vm2_interface" version = "0.2.1" -source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" +source = "git+https://github.com/matter-labs/vm2.git?rev=457d8a7eea9093af9440662e33e598c13ba41633#457d8a7eea9093af9440662e33e598c13ba41633" dependencies = [ "primitive-types", ] @@ -8701,7 +8785,7 @@ dependencies = [ "async-trait", "bincode", "circuit_definitions", - "const-decoder", + "const-decoder 0.3.0", "ctrlc", "futures 0.3.30", "jemallocator", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 742eee649de1..15e819d77f7d 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -28,11 +28,13 @@ debug-map-sorted = "0.1.1" dialoguer = "0.11" futures = "0.3" hex = "0.4" +humantime = "2.1" +humantime-serde = "1.1" indicatif = "0.16" itertools = "0.10.5" jemallocator = "0.5" k8s-openapi = { version = "0.23.0", features = ["v1_30"] } -kube = { version = "0.95.0", features = ["runtime", "derive"] } +kube = { version = "0.96.0", features = ["runtime", "derive"] } local-ip-address = "0.5.0" log = "0.4.20" md5 = "0.7.0" @@ -47,28 +49,31 @@ rustls = { version = "0.23.12", features = ["ring"] } serde = "1.0" serde_derive = "1.0" serde_json = "1.0" +serde_yaml = "0.9" sha3 = "0.10.8" sqlx = { version = "0.8.1", default-features = false } structopt = "0.3.26" strum = { version = "0.26" } +strum_macros = "0.26" tempfile = "3" -time = "0.3.36" tokio = "1" tokio-util = "0.7.11" +tokio-stream = "0.1.16" toml_edit = "0.14.4" tracing = "0.1" tracing-subscriber = "0.3" +tracing-test = "0.2.5" url = "2.5.2" vise = "0.2.0" # Proving dependencies -circuit_definitions = "=0.150.5" -circuit_sequencer_api = "=0.150.5" -zkevm_test_harness = "=0.150.5" +circuit_definitions = "=0.150.7" +circuit_sequencer_api = "=0.150.7" +zkevm_test_harness = "=0.150.7" # GPU proving dependencies -wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.9" } -shivini = "=0.150.9" +wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.151.1" } +shivini = "=0.151.1" # Core workspace dependencies zksync_multivm = { path = "../core/lib/multivm", version = "0.1.0" } @@ -96,6 +101,8 @@ zksync_prover_fri_types = { path = "crates/lib/prover_fri_types" } zksync_prover_fri_utils = { path = "crates/lib/prover_fri_utils" } zksync_prover_keystore = { path = "crates/lib/keystore" } zksync_vk_setup_data_generator_server_fri = { path = "crates/bin/vk_setup_data_generator_server_fri" } +zksync_prover_job_processor = { path = "crates/lib/prover_job_processor" } +zksync_circuit_prover_service = { path = "crates/lib/circuit_prover_service" } zksync_prover_job_monitor = { path = "crates/bin/prover_job_monitor" } # for `perf` profiling diff --git a/prover/crates/bin/circuit_prover/Cargo.toml b/prover/crates/bin/circuit_prover/Cargo.toml index a5751a4cd9a6..d7b7a8ca80fd 100644 --- a/prover/crates/bin/circuit_prover/Cargo.toml +++ b/prover/crates/bin/circuit_prover/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "zksync_circuit_prover" +description = "ZKsync circuit prover binary implementation" version.workspace = true edition.workspace = true authors.workspace = true @@ -8,6 +9,7 @@ repository.workspace = true license.workspace = true keywords.workspace = true categories.workspace = true +publish = false [dependencies] tokio = { workspace = true, features = ["macros", "time"] } @@ -29,6 +31,8 @@ zksync_prover_keystore = { workspace = true, features = ["gpu"] } zksync_env_config.workspace = true zksync_core_leftovers.workspace = true zksync_utils.workspace = true +zksync_circuit_prover_service.workspace = true +zksync_prover_job_processor.workspace = true vise.workspace = true shivini = { workspace = true, features = [ diff --git a/prover/crates/bin/circuit_prover/src/circuit_prover.rs b/prover/crates/bin/circuit_prover/src/circuit_prover.rs deleted file mode 100644 index 1a5f8aa0d974..000000000000 --- a/prover/crates/bin/circuit_prover/src/circuit_prover.rs +++ /dev/null @@ -1,397 +0,0 @@ -use std::{sync::Arc, time::Instant}; - -use anyhow::Context; -use shivini::{ - gpu_proof_config::GpuProofConfig, gpu_prove_from_external_witness_data, ProverContext, - ProverContextConfig, -}; -use tokio::{sync::mpsc::Receiver, task::JoinHandle}; -use tokio_util::sync::CancellationToken; -use zkevm_test_harness::prover_utils::{verify_base_layer_proof, verify_recursion_layer_proof}; -use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; -use zksync_prover_fri_types::{ - circuit_definitions::{ - base_layer_proof_config, - boojum::{ - cs::implementations::{pow::NoPow, witness::WitnessVec}, - field::goldilocks::GoldilocksField, - worker::Worker, - }, - circuit_definitions::{ - base_layer::ZkSyncBaseLayerProof, recursion_layer::ZkSyncRecursionLayerProof, - }, - recursion_layer_proof_config, - }, - CircuitWrapper, FriProofWrapper, ProverArtifacts, WitnessVectorArtifactsTemp, -}; -use zksync_prover_keystore::GoldilocksGpuProverSetupData; -use zksync_types::protocol_version::ProtocolSemanticVersion; -use zksync_utils::panic_extractor::try_extract_panic_message; - -use crate::{ - metrics::CIRCUIT_PROVER_METRICS, - types::{DefaultTranscript, DefaultTreeHasher, Proof, VerificationKey}, - SetupDataCache, -}; - -/// In charge of proving circuits, given a Witness Vector source. -/// Both job runner & job executor. -#[derive(Debug)] -pub struct CircuitProver { - connection_pool: ConnectionPool, - object_store: Arc, - protocol_version: ProtocolSemanticVersion, - /// Witness Vector source receiver - receiver: Receiver, - /// Setup Data used for proving & proof verification - setup_data_cache: SetupDataCache, -} - -impl CircuitProver { - pub fn new( - connection_pool: ConnectionPool, - object_store: Arc, - protocol_version: ProtocolSemanticVersion, - receiver: Receiver, - max_allocation: Option, - setup_data_cache: SetupDataCache, - ) -> anyhow::Result<(Self, ProverContext)> { - // VRAM allocation - let prover_context = match max_allocation { - Some(max_allocation) => ProverContext::create_with_config( - ProverContextConfig::default().with_maximum_device_allocation(max_allocation), - ) - .context("failed initializing fixed gpu prover context")?, - None => ProverContext::create().context("failed initializing gpu prover context")?, - }; - Ok(( - Self { - connection_pool, - object_store, - protocol_version, - receiver, - setup_data_cache, - }, - prover_context, - )) - } - - /// Continuously polls `receiver` for Witness Vectors and proves them. - /// All job executions are persisted. - pub async fn run(mut self, cancellation_token: CancellationToken) -> anyhow::Result<()> { - while !cancellation_token.is_cancelled() { - let time = Instant::now(); - - let artifact = self - .receiver - .recv() - .await - .context("no Witness Vector Generators are available")?; - tracing::info!( - "Circuit Prover received job {:?} after: {:?}", - artifact.prover_job.job_id, - time.elapsed() - ); - CIRCUIT_PROVER_METRICS.job_wait_time.observe(time.elapsed()); - - self.prove(artifact, cancellation_token.clone()) - .await - .context("failed to prove circuit proof")?; - } - tracing::info!("Circuit Prover shut down."); - Ok(()) - } - - /// Proves a job, with persistence of execution. - async fn prove( - &self, - artifact: WitnessVectorArtifactsTemp, - cancellation_token: CancellationToken, - ) -> anyhow::Result<()> { - let time = Instant::now(); - let block_number = artifact.prover_job.block_number; - let job_id = artifact.prover_job.job_id; - let job_start_time = artifact.time; - let setup_data_key = artifact.prover_job.setup_data_key.crypto_setup_key(); - let setup_data = self - .setup_data_cache - .get(&setup_data_key) - .context(format!( - "failed to get setup data for key {setup_data_key:?}" - ))? - .clone(); - let task = tokio::task::spawn_blocking(move || { - let _span = tracing::info_span!("prove_circuit_proof", %block_number).entered(); - Self::prove_circuit_proof(artifact, setup_data).context("failed to prove circuit") - }); - - self.finish_task( - job_id, - time, - job_start_time, - task, - cancellation_token.clone(), - ) - .await?; - tracing::info!( - "Circuit Prover finished job {:?} in: {:?}", - job_id, - time.elapsed() - ); - CIRCUIT_PROVER_METRICS - .job_finished_time - .observe(time.elapsed()); - CIRCUIT_PROVER_METRICS - .full_proving_time - .observe(job_start_time.elapsed()); - Ok(()) - } - - /// Proves a job using crypto primitives (proof generation & proof verification). - #[tracing::instrument( - name = "Prover::prove_circuit_proof", - skip_all, - fields(l1_batch = % witness_vector_artifacts.prover_job.block_number) - )] - pub fn prove_circuit_proof( - witness_vector_artifacts: WitnessVectorArtifactsTemp, - setup_data: Arc, - ) -> anyhow::Result { - let time = Instant::now(); - let WitnessVectorArtifactsTemp { - witness_vector, - prover_job, - .. - } = witness_vector_artifacts; - - let job_id = prover_job.job_id; - let circuit_wrapper = prover_job.circuit_wrapper; - let block_number = prover_job.block_number; - - let (proof, circuit_id) = - Self::generate_proof(&circuit_wrapper, witness_vector, &setup_data) - .context(format!("failed to generate proof for job id {job_id}"))?; - - Self::verify_proof(&circuit_wrapper, &proof, &setup_data.vk).context(format!( - "failed to verify proof with job_id {job_id}, circuit_id: {circuit_id}" - ))?; - - let proof_wrapper = match &circuit_wrapper { - CircuitWrapper::Base(_) => { - FriProofWrapper::Base(ZkSyncBaseLayerProof::from_inner(circuit_id, proof)) - } - CircuitWrapper::Recursive(_) => { - FriProofWrapper::Recursive(ZkSyncRecursionLayerProof::from_inner(circuit_id, proof)) - } - CircuitWrapper::BasePartial(_) => { - return Self::partial_proof_error(); - } - }; - CIRCUIT_PROVER_METRICS - .crypto_primitives_time - .observe(time.elapsed()); - Ok(ProverArtifacts::new(block_number, proof_wrapper)) - } - - /// Generates a proof from crypto primitives. - fn generate_proof( - circuit_wrapper: &CircuitWrapper, - witness_vector: WitnessVec, - setup_data: &Arc, - ) -> anyhow::Result<(Proof, u8)> { - let time = Instant::now(); - - let worker = Worker::new(); - - let (gpu_proof_config, proof_config, circuit_id) = match circuit_wrapper { - CircuitWrapper::Base(circuit) => ( - GpuProofConfig::from_base_layer_circuit(circuit), - base_layer_proof_config(), - circuit.numeric_circuit_type(), - ), - CircuitWrapper::Recursive(circuit) => ( - GpuProofConfig::from_recursive_layer_circuit(circuit), - recursion_layer_proof_config(), - circuit.numeric_circuit_type(), - ), - CircuitWrapper::BasePartial(_) => { - return Self::partial_proof_error(); - } - }; - - let proof = - gpu_prove_from_external_witness_data::( - &gpu_proof_config, - &witness_vector, - proof_config, - &setup_data.setup, - &setup_data.vk, - (), - &worker, - ) - .context("crypto primitive: failed to generate proof")?; - CIRCUIT_PROVER_METRICS - .generate_proof_time - .observe(time.elapsed()); - Ok((proof.into(), circuit_id)) - } - - /// Verifies a proof from crypto primitives - fn verify_proof( - circuit_wrapper: &CircuitWrapper, - proof: &Proof, - verification_key: &VerificationKey, - ) -> anyhow::Result<()> { - let time = Instant::now(); - - let is_valid = match circuit_wrapper { - CircuitWrapper::Base(base_circuit) => { - verify_base_layer_proof::(base_circuit, proof, verification_key) - } - CircuitWrapper::Recursive(recursive_circuit) => { - verify_recursion_layer_proof::(recursive_circuit, proof, verification_key) - } - CircuitWrapper::BasePartial(_) => { - return Self::partial_proof_error(); - } - }; - - CIRCUIT_PROVER_METRICS - .verify_proof_time - .observe(time.elapsed()); - - if !is_valid { - return Err(anyhow::anyhow!("crypto primitive: failed to verify proof")); - } - Ok(()) - } - - /// This code path should never trigger. All proofs are hydrated during Witness Vector Generator. - /// If this triggers, it means that proof hydration in Witness Vector Generator was not done -- logic bug. - fn partial_proof_error() -> anyhow::Result { - Err(anyhow::anyhow!("received unexpected dehydrated proof")) - } - - /// Runs task to completion and persists result. - /// NOTE: Task may be cancelled mid-flight. - async fn finish_task( - &self, - job_id: u32, - time: Instant, - job_start_time: Instant, - task: JoinHandle>, - cancellation_token: CancellationToken, - ) -> anyhow::Result<()> { - tokio::select! { - _ = cancellation_token.cancelled() => { - tracing::info!("Stop signal received, shutting down Circuit Prover..."); - return Ok(()) - } - result = task => { - let error_message = match result { - Ok(Ok(prover_artifact)) => { - tracing::info!("Circuit Prover executed job {:?} in: {:?}", job_id, time.elapsed()); - CIRCUIT_PROVER_METRICS.execution_time.observe(time.elapsed()); - self - .save_result(job_id, job_start_time, prover_artifact) - .await.context("failed to save result")?; - return Ok(()) - } - Ok(Err(error)) => error.to_string(), - Err(error) => try_extract_panic_message(error), - }; - tracing::error!( - "Circuit Prover failed on job {:?} with error {:?}", - job_id, - error_message - ); - - self.save_failure(job_id, error_message).await.context("failed to save failure")?; - } - } - - Ok(()) - } - - /// Persists proof generated. - /// Job metadata is saved to database, whilst artifacts go to object store. - async fn save_result( - &self, - job_id: u32, - job_start_time: Instant, - artifacts: ProverArtifacts, - ) -> anyhow::Result<()> { - let time = Instant::now(); - let mut connection = self - .connection_pool - .connection() - .await - .context("failed to get db connection")?; - let proof = artifacts.proof_wrapper; - - let (_circuit_type, is_scheduler_proof) = match &proof { - FriProofWrapper::Base(base) => (base.numeric_circuit_type(), false), - FriProofWrapper::Recursive(recursive_circuit) => match recursive_circuit { - ZkSyncRecursionLayerProof::SchedulerCircuit(_) => { - (recursive_circuit.numeric_circuit_type(), true) - } - _ => (recursive_circuit.numeric_circuit_type(), false), - }, - }; - - let upload_time = Instant::now(); - let blob_url = self - .object_store - .put(job_id, &proof) - .await - .context("failed to upload to object store")?; - CIRCUIT_PROVER_METRICS - .artifact_upload_time - .observe(upload_time.elapsed()); - - let mut transaction = connection - .start_transaction() - .await - .context("failed to start db transaction")?; - transaction - .fri_prover_jobs_dal() - .save_proof(job_id, job_start_time.elapsed(), &blob_url) - .await; - if is_scheduler_proof { - transaction - .fri_proof_compressor_dal() - .insert_proof_compression_job( - artifacts.block_number, - &blob_url, - self.protocol_version, - ) - .await; - } - transaction - .commit() - .await - .context("failed to commit db transaction")?; - - tracing::info!( - "Circuit Prover saved job {:?} after {:?}", - job_id, - time.elapsed() - ); - CIRCUIT_PROVER_METRICS.save_time.observe(time.elapsed()); - - Ok(()) - } - - /// Persists job execution error to database. - async fn save_failure(&self, job_id: u32, error: String) -> anyhow::Result<()> { - self.connection_pool - .connection() - .await - .context("failed to get db connection")? - .fri_prover_jobs_dal() - .save_proof_error(job_id, error) - .await; - Ok(()) - } -} diff --git a/prover/crates/bin/circuit_prover/src/lib.rs b/prover/crates/bin/circuit_prover/src/lib.rs index 7d7ce1d96686..c25afe6e9b3b 100644 --- a/prover/crates/bin/circuit_prover/src/lib.rs +++ b/prover/crates/bin/circuit_prover/src/lib.rs @@ -1,13 +1,5 @@ -#![allow(incomplete_features)] // We have to use generic const exprs. -#![feature(generic_const_exprs)] -pub use backoff::Backoff; -pub use circuit_prover::CircuitProver; pub use metrics::PROVER_BINARY_METRICS; pub use types::{FinalizationHintsCache, SetupDataCache}; -pub use witness_vector_generator::WitnessVectorGenerator; -mod backoff; -mod circuit_prover; mod metrics; mod types; -mod witness_vector_generator; diff --git a/prover/crates/bin/circuit_prover/src/main.rs b/prover/crates/bin/circuit_prover/src/main.rs index e26f29ca995d..e115d1510657 100644 --- a/prover/crates/bin/circuit_prover/src/main.rs +++ b/prover/crates/bin/circuit_prover/src/main.rs @@ -6,11 +6,10 @@ use std::{ use anyhow::Context as _; use clap::Parser; +use shivini::{ProverContext, ProverContextConfig}; use tokio_util::sync::CancellationToken; -use zksync_circuit_prover::{ - Backoff, CircuitProver, FinalizationHintsCache, SetupDataCache, WitnessVectorGenerator, - PROVER_BINARY_METRICS, -}; +use zksync_circuit_prover::{FinalizationHintsCache, SetupDataCache, PROVER_BINARY_METRICS}; +use zksync_circuit_prover_service::job_runner::{circuit_prover_runner, WvgRunnerBuilder}; use zksync_config::{ configs::{FriProverConfig, ObservabilityConfig}, ObjectStoreConfig, @@ -22,82 +21,105 @@ use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; use zksync_prover_keystore::keystore::Keystore; use zksync_utils::wait_for_tasks::ManagedTasks; +/// On most commodity hardware, WVG can take ~30 seconds to complete. +/// GPU processing is ~1 second. +/// Typical setup is ~25 WVGs & 1 GPU. +/// Worst case scenario, you just picked all 25 WVGs (so you need 30 seconds to finish) +/// and another 25 for the GPU. +const GRACEFUL_SHUTDOWN_DURATION: Duration = Duration::from_secs(55); + +/// With current setup, only a single job is expected to be in flight. +/// This guarantees memory consumption is going to be fixed (1 job in memory, no more). +/// Additionally, helps with estimating graceful shutdown time. +/// Free side effect, if the machine dies, only 1 job is in "pending" state. +const CHANNEL_SIZE: usize = 1; + #[derive(Debug, Parser)] #[command(author = "Matter Labs", version)] struct Cli { - #[arg(long)] + /// Path to file configuration + #[arg(short = 'c', long)] pub(crate) config_path: Option, - #[arg(long)] + /// Path to file secrets + #[arg(short = 's', long)] pub(crate) secrets_path: Option, - /// Number of WVG jobs to run in parallel. - /// Default value is 1. - #[arg(long, default_value_t = 1)] - pub(crate) witness_vector_generator_count: usize, + /// Number of light witness vector generators to run in parallel. + /// Corresponds to 1 CPU thread & ~2GB of RAM. + #[arg(short = 'l', long, default_value_t = 1)] + light_wvg_count: usize, + /// Number of heavy witness vector generators to run in parallel. + /// Corresponds to 1 CPU thread & ~9GB of RAM. + #[arg(short = 'h', long, default_value_t = 1)] + heavy_wvg_count: usize, /// Max VRAM to allocate. Useful if you want to limit the size of VRAM used. /// None corresponds to allocating all available VRAM. - #[arg(long)] + #[arg(short = 'm', long)] pub(crate) max_allocation: Option, } #[tokio::main] async fn main() -> anyhow::Result<()> { - let time = Instant::now(); + let start_time = Instant::now(); let opt = Cli::parse(); let (observability_config, prover_config, object_store_config) = load_configs(opt.config_path)?; - let _observability_guard = observability_config .install() .context("failed to install observability")?; - let wvg_count = opt.witness_vector_generator_count as u32; - - let (connection_pool, object_store, setup_data_cache, hints) = load_resources( + let (connection_pool, object_store, prover_context, setup_data_cache, hints) = load_resources( opt.secrets_path, + opt.max_allocation, object_store_config, prover_config.setup_data_path.into(), - wvg_count, ) .await .context("failed to load configs")?; - PROVER_BINARY_METRICS.start_up.observe(time.elapsed()); + PROVER_BINARY_METRICS + .startup_time + .observe(start_time.elapsed()); let cancellation_token = CancellationToken::new(); - let backoff = Backoff::new(Duration::from_secs(5), Duration::from_secs(30)); let mut tasks = vec![]; - let (sender, receiver) = tokio::sync::mpsc::channel(5); - - tracing::info!("Starting {wvg_count} Witness Vector Generators."); - - for _ in 0..wvg_count { - let wvg = WitnessVectorGenerator::new( - object_store.clone(), - connection_pool.clone(), - PROVER_PROTOCOL_SEMANTIC_VERSION, - sender.clone(), - hints.clone(), - ); - tasks.push(tokio::spawn( - wvg.run(cancellation_token.clone(), backoff.clone()), - )); - } + let (witness_vector_sender, witness_vector_receiver) = tokio::sync::mpsc::channel(CHANNEL_SIZE); + + tracing::info!( + "Starting {} light WVGs and {} heavy WVGs.", + opt.light_wvg_count, + opt.heavy_wvg_count + ); + + let builder = WvgRunnerBuilder::new( + connection_pool.clone(), + object_store.clone(), + PROVER_PROTOCOL_SEMANTIC_VERSION, + hints.clone(), + witness_vector_sender, + cancellation_token.clone(), + ); + + let light_wvg_runner = builder.light_wvg_runner(opt.light_wvg_count); + let heavy_wvg_runner = builder.heavy_wvg_runner(opt.heavy_wvg_count); - // NOTE: Prover Context is the way VRAM is allocated. If it is dropped, the claim on VRAM allocation is dropped as well. - // It has to be kept until prover dies. Whilst it may be kept in prover struct, during cancellation, prover can `drop`, but the thread doing the processing can still be alive. - // This setup prevents segmentation faults and other nasty behavior during shutdown. - let (prover, _prover_context) = CircuitProver::new( + tasks.extend(light_wvg_runner.run()); + tasks.extend(heavy_wvg_runner.run()); + + // necessary as it has a connection_pool which will keep 1 connection active by default + drop(builder); + + let circuit_prover_runner = circuit_prover_runner( connection_pool, object_store, PROVER_PROTOCOL_SEMANTIC_VERSION, - receiver, - opt.max_allocation, setup_data_cache, - ) - .context("failed to create circuit prover")?; - tasks.push(tokio::spawn(prover.run(cancellation_token.clone()))); + witness_vector_receiver, + prover_context, + ); + + tasks.extend(circuit_prover_runner.run()); let mut tasks = ManagedTasks::new(tasks); tokio::select! { @@ -114,12 +136,15 @@ async fn main() -> anyhow::Result<()> { } } } - PROVER_BINARY_METRICS.run_time.observe(time.elapsed()); - tasks.complete(Duration::from_secs(5)).await; + let shutdown_time = Instant::now(); + tasks.complete(GRACEFUL_SHUTDOWN_DURATION).await; + PROVER_BINARY_METRICS + .shutdown_time + .observe(shutdown_time.elapsed()); + PROVER_BINARY_METRICS.run_time.observe(start_time.elapsed()); Ok(()) } - /// Loads configs necessary for proving. /// - observability config - for observability setup /// - prover config - necessary for setup data @@ -143,20 +168,21 @@ fn load_configs( tracing::info!("Loaded configs."); Ok((observability_config, prover_config, object_store_config)) } - /// Loads resources necessary for proving. /// - connection pool - necessary to pick & store jobs from database /// - object store - necessary for loading and storing artifacts to object store +/// - prover context - necessary for circuit proving; VRAM allocation /// - setup data - necessary for circuit proving /// - finalization hints - necessary for generating witness vectors async fn load_resources( secrets_path: Option, + max_gpu_vram_allocation: Option, object_store_config: ObjectStoreConfig, setup_data_path: PathBuf, - wvg_count: u32, ) -> anyhow::Result<( ConnectionPool, Arc, + ProverContext, SetupDataCache, FinalizationHintsCache, )> { @@ -165,9 +191,8 @@ async fn load_resources( let database_url = database_secrets .prover_url .context("no prover DB URl present")?; - - // 1 connection for the prover and one for each vector generator - let max_connections = 1 + wvg_count; + // 2 connections for the witness vector generator job pickers (1 each) and 1 for gpu circuit prover job saver + let max_connections = 3; let connection_pool = ConnectionPool::::builder(database_url, max_connections) .build() .await @@ -178,23 +203,34 @@ async fn load_resources( .await .context("failed to create object store")?; - tracing::info!("Loading mappings from disk..."); + let prover_context = match max_gpu_vram_allocation { + Some(max_allocation) => ProverContext::create_with_config( + ProverContextConfig::default().with_maximum_device_allocation(max_allocation), + ) + .context("failed initializing fixed gpu prover context")?, + None => ProverContext::create().context("failed initializing gpu prover context")?, + }; + + tracing::info!("Loading setup data from disk..."); let keystore = Keystore::locate().with_setup_path(Some(setup_data_path)); let setup_data_cache = keystore .load_all_setup_key_mapping() .await .context("failed to load setup key mapping")?; + + tracing::info!("Loading finalization hints from disk..."); let finalization_hints = keystore .load_all_finalization_hints_mapping() .await .context("failed to load finalization hints mapping")?; - tracing::info!("Loaded mappings from disk."); + tracing::info!("Finished loading mappings from disk."); Ok(( connection_pool, object_store, + prover_context, setup_data_cache, finalization_hints, )) diff --git a/prover/crates/bin/circuit_prover/src/metrics.rs b/prover/crates/bin/circuit_prover/src/metrics.rs index e9f445914795..f9b8c38e3e34 100644 --- a/prover/crates/bin/circuit_prover/src/metrics.rs +++ b/prover/crates/bin/circuit_prover/src/metrics.rs @@ -2,79 +2,20 @@ use std::time::Duration; use vise::{Buckets, Histogram, Metrics}; +/// Instrument prover binary lifecycle #[derive(Debug, Metrics)] #[metrics(prefix = "prover_binary")] pub struct ProverBinaryMetrics { /// How long does it take for prover to load data before it can produce proofs? #[metrics(buckets = Buckets::LATENCIES)] - pub start_up: Histogram, - /// How long has the prover been running? + pub startup_time: Histogram, + /// How long did the prover binary run for? #[metrics(buckets = Buckets::LATENCIES)] pub run_time: Histogram, -} - -#[vise::register] -pub static PROVER_BINARY_METRICS: vise::Global = vise::Global::new(); - -#[derive(Debug, Metrics)] -#[metrics(prefix = "witness_vector_generator")] -pub struct WitnessVectorGeneratorMetrics { - /// How long does witness vector generator waits before a job is available? - #[metrics(buckets = Buckets::LATENCIES)] - pub job_wait_time: Histogram, - /// How long does it take to load object store artifacts for a witness vector job? - #[metrics(buckets = Buckets::LATENCIES)] - pub artifact_download_time: Histogram, - /// How long does the crypto witness generation primitive take? - #[metrics(buckets = Buckets::LATENCIES)] - pub crypto_primitive_time: Histogram, - /// How long does it take for a job to be executed, from the moment it's loaded? - #[metrics(buckets = Buckets::LATENCIES)] - pub execution_time: Histogram, - /// How long does it take to send a job to prover? - /// This is relevant because prover queue can apply back-pressure. - #[metrics(buckets = Buckets::LATENCIES)] - pub send_time: Histogram, - /// How long does it take for a job to be considered finished, from the moment it's been loaded? - #[metrics(buckets = Buckets::LATENCIES)] - pub job_finished_time: Histogram, -} - -#[vise::register] -pub static WITNESS_VECTOR_GENERATOR_METRICS: vise::Global = - vise::Global::new(); - -#[derive(Debug, Metrics)] -#[metrics(prefix = "circuit_prover")] -pub struct CircuitProverMetrics { - /// How long does circuit prover wait before a job is available? - #[metrics(buckets = Buckets::LATENCIES)] - pub job_wait_time: Histogram, - /// How long does the crypto primitives (proof generation & verification) take? - #[metrics(buckets = Buckets::LATENCIES)] - pub crypto_primitives_time: Histogram, - /// How long does proof generation (crypto primitive) take? - #[metrics(buckets = Buckets::LATENCIES)] - pub generate_proof_time: Histogram, - /// How long does verify proof (crypto primitive) take? + /// How long does it take prover to gracefully shutdown? #[metrics(buckets = Buckets::LATENCIES)] - pub verify_proof_time: Histogram, - /// How long does it take for a job to be executed, from the moment it's loaded? - #[metrics(buckets = Buckets::LATENCIES)] - pub execution_time: Histogram, - /// How long does it take to upload proof to object store? - #[metrics(buckets = Buckets::LATENCIES)] - pub artifact_upload_time: Histogram, - /// How long does it take to save a job? - #[metrics(buckets = Buckets::LATENCIES)] - pub save_time: Histogram, - /// How long does it take for a job to be considered finished, from the moment it's been loaded? - #[metrics(buckets = Buckets::LATENCIES)] - pub job_finished_time: Histogram, - /// How long does it take a job to go from witness generation to having the proof saved? - #[metrics(buckets = Buckets::LATENCIES)] - pub full_proving_time: Histogram, + pub shutdown_time: Histogram, } #[vise::register] -pub static CIRCUIT_PROVER_METRICS: vise::Global = vise::Global::new(); +pub static PROVER_BINARY_METRICS: vise::Global = vise::Global::new(); diff --git a/prover/crates/bin/circuit_prover/src/types.rs b/prover/crates/bin/circuit_prover/src/types.rs index 52cdd48b6b50..e4e1fdc13b8f 100644 --- a/prover/crates/bin/circuit_prover/src/types.rs +++ b/prover/crates/bin/circuit_prover/src/types.rs @@ -1,31 +1,12 @@ use std::{collections::HashMap, sync::Arc}; use zksync_prover_fri_types::{ - circuit_definitions::boojum::{ - algebraic_props::{ - round_function::AbsorptionModeOverwrite, sponge::GoldilocksPoseidon2Sponge, - }, - cs::implementations::{ - proof::Proof as CryptoProof, setup::FinalizationHintsForProver, - transcript::GoldilocksPoisedon2Transcript, - verifier::VerificationKey as CryptoVerificationKey, - }, - field::goldilocks::{GoldilocksExt2, GoldilocksField}, - }, + circuit_definitions::boojum::cs::implementations::setup::FinalizationHintsForProver, ProverServiceDataKey, }; use zksync_prover_keystore::GoldilocksGpuProverSetupData; -// prover types -pub type DefaultTranscript = GoldilocksPoisedon2Transcript; -pub type DefaultTreeHasher = GoldilocksPoseidon2Sponge; - -type F = GoldilocksField; -type H = GoldilocksPoseidon2Sponge; -type Ext = GoldilocksExt2; -pub type Proof = CryptoProof; -pub type VerificationKey = CryptoVerificationKey; - +// TODO: To be moved to circuit_prover_service lib & adjusted to new type idiom // cache types pub type SetupDataCache = HashMap>; pub type FinalizationHintsCache = HashMap>; diff --git a/prover/crates/bin/circuit_prover/src/witness_vector_generator.rs b/prover/crates/bin/circuit_prover/src/witness_vector_generator.rs deleted file mode 100644 index cb2d2a256df9..000000000000 --- a/prover/crates/bin/circuit_prover/src/witness_vector_generator.rs +++ /dev/null @@ -1,345 +0,0 @@ -use std::{collections::HashMap, sync::Arc, time::Instant}; - -use anyhow::Context; -use tokio::{sync::mpsc::Sender, task::JoinHandle}; -use tokio_util::sync::CancellationToken; -use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; -use zksync_prover_fri_types::{ - circuit_definitions::{ - boojum::{ - cs::implementations::setup::FinalizationHintsForProver, - field::goldilocks::GoldilocksField, - gadgets::queue::full_state_queue::FullStateCircuitQueueRawWitness, - }, - circuit_definitions::base_layer::ZkSyncBaseLayerCircuit, - }, - get_current_pod_name, - keys::RamPermutationQueueWitnessKey, - CircuitAuxData, CircuitWrapper, ProverJob, ProverServiceDataKey, RamPermutationQueueWitness, - WitnessVectorArtifactsTemp, -}; -use zksync_types::{protocol_version::ProtocolSemanticVersion, L1BatchNumber}; -use zksync_utils::panic_extractor::try_extract_panic_message; - -use crate::{metrics::WITNESS_VECTOR_GENERATOR_METRICS, Backoff, FinalizationHintsCache}; - -/// In charge of generating Witness Vectors and sending them to Circuit Prover. -/// Both job runner & job executor. -#[derive(Debug)] -pub struct WitnessVectorGenerator { - object_store: Arc, - connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - /// Finalization Hints used for Witness Vector generation - finalization_hints_cache: FinalizationHintsCache, - /// Witness Vector sender for Circuit Prover - sender: Sender, - pod_name: String, -} - -impl WitnessVectorGenerator { - pub fn new( - object_store: Arc, - connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - sender: Sender, - finalization_hints: HashMap>, - ) -> Self { - Self { - object_store, - connection_pool, - protocol_version, - finalization_hints_cache: finalization_hints, - sender, - pod_name: get_current_pod_name(), - } - } - - /// Continuously polls database for new prover jobs and generates witness vectors for them. - /// All job executions are persisted. - pub async fn run( - self, - cancellation_token: CancellationToken, - mut backoff: Backoff, - ) -> anyhow::Result<()> { - let mut get_job_timer = Instant::now(); - while !cancellation_token.is_cancelled() { - if let Some(prover_job) = self - .get_job() - .await - .context("failed to get next witness generation job")? - { - tracing::info!( - "Witness Vector Generator received job {:?} after: {:?}", - prover_job.job_id, - get_job_timer.elapsed() - ); - WITNESS_VECTOR_GENERATOR_METRICS - .job_wait_time - .observe(get_job_timer.elapsed()); - if let e @ Err(_) = self.generate(prover_job, cancellation_token.clone()).await { - // this means that the witness vector receiver is closed, no need to report the error, just return - if cancellation_token.is_cancelled() { - return Ok(()); - } - e.context("failed to generate witness")? - } - - // waiting for a job timer starts as soon as the other is finished - get_job_timer = Instant::now(); - backoff.reset(); - continue; - }; - self.backoff(&mut backoff, cancellation_token.clone()).await; - } - tracing::info!("Witness Vector Generator shut down."); - Ok(()) - } - - /// Retrieves a prover job from database, loads artifacts from object store and hydrates them. - async fn get_job(&self) -> anyhow::Result> { - let mut connection = self - .connection_pool - .connection() - .await - .context("failed to get db connection")?; - let prover_job_metadata = match connection - .fri_prover_jobs_dal() - .get_job(self.protocol_version, &self.pod_name) - .await - { - None => return Ok(None), - Some(job) => job, - }; - - let time = Instant::now(); - let circuit_wrapper = self - .object_store - .get(prover_job_metadata.into()) - .await - .context("failed to get circuit_wrapper from object store")?; - let artifact = match circuit_wrapper { - a @ CircuitWrapper::Base(_) => a, - a @ CircuitWrapper::Recursive(_) => a, - CircuitWrapper::BasePartial((circuit, aux_data)) => self - .fill_witness(circuit, aux_data, prover_job_metadata.block_number) - .await - .context("failed to fill witness")?, - }; - WITNESS_VECTOR_GENERATOR_METRICS - .artifact_download_time - .observe(time.elapsed()); - - let setup_data_key = ProverServiceDataKey { - circuit_id: prover_job_metadata.circuit_id, - round: prover_job_metadata.aggregation_round, - } - .crypto_setup_key(); - let prover_job = ProverJob::new( - prover_job_metadata.block_number, - prover_job_metadata.id, - artifact, - setup_data_key, - ); - Ok(Some(prover_job)) - } - - /// Prover artifact hydration. - async fn fill_witness( - &self, - circuit: ZkSyncBaseLayerCircuit, - aux_data: CircuitAuxData, - l1_batch_number: L1BatchNumber, - ) -> anyhow::Result { - if let ZkSyncBaseLayerCircuit::RAMPermutation(circuit_instance) = circuit { - let sorted_witness_key = RamPermutationQueueWitnessKey { - block_number: l1_batch_number, - circuit_subsequence_number: aux_data.circuit_subsequence_number as usize, - is_sorted: true, - }; - let sorted_witness: RamPermutationQueueWitness = self - .object_store - .get(sorted_witness_key) - .await - .context("failed to load sorted witness key")?; - - let unsorted_witness_key = RamPermutationQueueWitnessKey { - block_number: l1_batch_number, - circuit_subsequence_number: aux_data.circuit_subsequence_number as usize, - is_sorted: false, - }; - let unsorted_witness: RamPermutationQueueWitness = self - .object_store - .get(unsorted_witness_key) - .await - .context("failed to load unsorted witness key")?; - - let mut witness = circuit_instance.witness.take().unwrap(); - witness.unsorted_queue_witness = FullStateCircuitQueueRawWitness { - elements: unsorted_witness.witness.into(), - }; - witness.sorted_queue_witness = FullStateCircuitQueueRawWitness { - elements: sorted_witness.witness.into(), - }; - circuit_instance.witness.store(Some(witness)); - - return Ok(CircuitWrapper::Base( - ZkSyncBaseLayerCircuit::RAMPermutation(circuit_instance), - )); - } - Err(anyhow::anyhow!( - "unexpected circuit received with partial witness, expected RAM permutation, got {:?}", - circuit.short_description() - )) - } - - /// Generates witness vector, with persistence of execution. - async fn generate( - &self, - prover_job: ProverJob, - cancellation_token: CancellationToken, - ) -> anyhow::Result<()> { - let start_time = Instant::now(); - let finalization_hints = self - .finalization_hints_cache - .get(&prover_job.setup_data_key) - .context(format!( - "failed to get finalization hints for key {:?}", - &prover_job.setup_data_key - ))? - .clone(); - let job_id = prover_job.job_id; - let task = tokio::task::spawn_blocking(move || { - let block_number = prover_job.block_number; - let _span = tracing::info_span!("witness_vector_generator", %block_number).entered(); - Self::generate_witness_vector(prover_job, finalization_hints) - }); - - self.finish_task(job_id, start_time, task, cancellation_token.clone()) - .await?; - - tracing::info!( - "Witness Vector Generator finished job {:?} in: {:?}", - job_id, - start_time.elapsed() - ); - WITNESS_VECTOR_GENERATOR_METRICS - .job_finished_time - .observe(start_time.elapsed()); - Ok(()) - } - - /// Generates witness vector using crypto primitives. - #[tracing::instrument( - skip_all, - fields(l1_batch = % prover_job.block_number) - )] - pub fn generate_witness_vector( - prover_job: ProverJob, - finalization_hints: Arc, - ) -> anyhow::Result { - let time = Instant::now(); - let cs = match prover_job.circuit_wrapper.clone() { - CircuitWrapper::Base(base_circuit) => { - base_circuit.synthesis::(&finalization_hints) - } - CircuitWrapper::Recursive(recursive_circuit) => { - recursive_circuit.synthesis::(&finalization_hints) - } - // circuit must be hydrated during `get_job` - CircuitWrapper::BasePartial(_) => { - return Err(anyhow::anyhow!("received unexpected dehydrated proof")); - } - }; - WITNESS_VECTOR_GENERATOR_METRICS - .crypto_primitive_time - .observe(time.elapsed()); - Ok(WitnessVectorArtifactsTemp::new( - cs.witness.unwrap(), - prover_job, - time, - )) - } - - /// Runs task to completion and persists result. - /// NOTE: Task may be cancelled mid-flight. - async fn finish_task( - &self, - job_id: u32, - time: Instant, - task: JoinHandle>, - cancellation_token: CancellationToken, - ) -> anyhow::Result<()> { - tokio::select! { - _ = cancellation_token.cancelled() => { - tracing::info!("Stop signal received, shutting down Witness Vector Generator..."); - return Ok(()) - } - result = task => { - let error_message = match result { - Ok(Ok(witness_vector)) => { - tracing::info!("Witness Vector Generator executed job {:?} in: {:?}", job_id, time.elapsed()); - WITNESS_VECTOR_GENERATOR_METRICS.execution_time.observe(time.elapsed()); - self - .save_result(witness_vector, job_id) - .await - .context("failed to save result")?; - return Ok(()) - } - Ok(Err(error)) => error.to_string(), - Err(error) => try_extract_panic_message(error), - }; - tracing::error!("Witness Vector Generator failed on job {job_id:?} with error {error_message:?}"); - - self.save_failure(job_id, error_message).await.context("failed to save failure")?; - } - } - - Ok(()) - } - - /// Sends proof to Circuit Prover. - async fn save_result( - &self, - artifacts: WitnessVectorArtifactsTemp, - job_id: u32, - ) -> anyhow::Result<()> { - let time = Instant::now(); - self.sender - .send(artifacts) - .await - .context("failed to send witness vector to prover")?; - tracing::info!( - "Witness Vector Generator sent job {:?} after {:?}", - job_id, - time.elapsed() - ); - WITNESS_VECTOR_GENERATOR_METRICS - .send_time - .observe(time.elapsed()); - Ok(()) - } - - /// Persists job execution error to database - async fn save_failure(&self, job_id: u32, error: String) -> anyhow::Result<()> { - self.connection_pool - .connection() - .await - .context("failed to get db connection")? - .fri_prover_jobs_dal() - .save_proof_error(job_id, error) - .await; - Ok(()) - } - - /// Backs off, whilst being cancellation aware. - async fn backoff(&self, backoff: &mut Backoff, cancellation_token: CancellationToken) { - let backoff_duration = backoff.delay(); - tracing::info!("Backing off for {:?}...", backoff_duration); - // Error here corresponds to a timeout w/o receiving task cancel; we're OK with this. - tokio::time::timeout(backoff_duration, cancellation_token.cancelled()) - .await - .ok(); - } -} diff --git a/prover/crates/bin/prover_autoscaler/Cargo.toml b/prover/crates/bin/prover_autoscaler/Cargo.toml index 9743b45593e7..4e66ecc2b0e3 100644 --- a/prover/crates/bin/prover_autoscaler/Cargo.toml +++ b/prover/crates/bin/prover_autoscaler/Cargo.toml @@ -10,22 +10,19 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_core_leftovers.workspace = true zksync_vlog.workspace = true zksync_utils.workspace = true -zksync_types.workspace = true zksync_config = { workspace = true, features = ["observability_ext"] } zksync_prover_job_monitor.workspace = true -zksync_protobuf_config.workspace = true debug-map-sorted.workspace = true anyhow.workspace = true async-trait.workspace = true axum.workspace = true chrono.workspace = true -clap = { workspace = true, features = ["derive"] } ctrlc = { workspace = true, features = ["termination"] } futures.workspace = true +humantime-serde.workspace = true k8s-openapi = { workspace = true, features = ["v1_30"] } kube = { workspace = true, features = ["runtime", "derive"] } once_cell.workspace = true @@ -35,11 +32,13 @@ ring.workspace = true rustls = { workspace = true, features = ["ring"] } serde = { workspace = true, features = ["derive"] } serde_json.workspace = true +serde_yaml.workspace = true structopt.workspace = true strum.workspace = true -time.workspace = true +strum_macros.workspace = true tokio = { workspace = true, features = ["time", "macros"] } tracing-subscriber = { workspace = true, features = ["env-filter"] } tracing.workspace = true url.workspace = true vise.workspace = true +tracing-test.workspace = true diff --git a/prover/crates/bin/prover_autoscaler/README.md b/prover/crates/bin/prover_autoscaler/README.md new file mode 100644 index 000000000000..3d1a9afe5a30 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/README.md @@ -0,0 +1,237 @@ +# Prover Autoscaler + +Prover Autoscaler is needed to automatically scale Prover related Kubernetes Deployments according to the load in a +cluster with higher chances to get Nodes to run. If the cluster runs out of resources it moves the load to next one. + +## Design + +Prover Autoscaler has the main Scaler part and Agents running in each cluster. + +### Agent + +Agents watch via Kubernetes API status of Deployments, Pods and out of resources Events; perform scaling by requests +from Scaler. They watch only specified in config namespaces. Agent provides listens on 2 ports: `prometheus_port` to +export metrics (path is `/metrics`), and `http_port` with 3 paths: `/healthz`, `/cluster` to get the cluster status and +`/scale` to scale Deployments up or down. + +### Scaler + +Scaler collects cluster statuses from Agents, job queues from prover-job-monitor, calculates needed number of replicas +and sends scale requests to Agents. + +Requests flow diagram: + +```mermaid +sequenceDiagram + participant prover-job-monitor + participant Scaler + box cluster1 + participant Agent1 + participant K8s API1 + end + box cluster2 + participant Agent2 + participant K8s API2 + end + loop Watch + Agent1->>K8s API1: Watch namespaces + end + loop Watch + Agent2->>K8s API2: Watch namespaces + end + loop Recalculate + Scaler->>prover-job-monitor: /report + Scaler->>Agent1: /cluster + Scaler->>Agent2: /cluster + Scaler->>Agent1: /scale + end +``` + +Scaler supports 2 types of scaling algorithms: GPU and Simple. GPU usually is prover itself and all other Deployments +are using Simple algorithm. + +Simple algorithm tries to scale the Deployment up to `queue / speed` replicas (rounded up) in the best cluster. If there +is not enough capacity it continue in the next best cluster and so on. On each run it selects "best cluster" using +priority, number of capacity issues and cluster size. The capacity is limited by config (`max_provers` or +`max_replicas`) and also by availability of machines in the cluster. Autoscaler detects that a cluster is running out of +particular machines by watching for `FailedScaleUp` events and also by checking if a Pod stuck in Pending for longer +than `long_pending_duration`. If not enough capacity is detected not running Pods will be moved. + +GPU algorithm works similar to Simple one, but it also recognise different GPU types and distribute load across L4 GPUs +first, then T4, V100, P100 and A100, if available. + +Different namespaces are running different protocol versions and completely independent. Normally only one namespace is +active, and only during protocol upgrade both are active. Each namespace has to have correct version of binaries +installed, see `protocol_versions` config option. + +## Dependencies + +- [prover-job-monitor](.../prover_job_monitor/) +- Kubernetes API +- GCP API (optional) + +## Permissions + +Agents need the following Kubernetes permissions: + +```yaml +- apiGroups: + - '' + resources: + - pods + - events + - namespaces + - nodes + verbs: + - get + - watch + - list +- apiGroups: + - apps + resources: + - deployments + - replicasets + verbs: + - get + - list + - watch + - patch + - update +``` + +## Configuration + +Prover Autoscaler requires a config file provided via `--config-path` flag, supported format: YAML. Also you need to +specify which job to run Scaler or Agent using `--job=scaler` or `--job=agent` flag correspondingly. + +### Common configuration + +- `graceful_shutdown_timeout` is time to wait for all the task to finish before force shutdown. Default: 5s. +- `observability` section configures type of `log_format` (`plain` or `json`) and log levels per module with + `log_directives`. + +Example: + +```yaml +graceful_shutdown_timeout: 5s +observability: + log_format: plain + log_directives: 'zksync_prover_autoscaler=debug' +``` + +### Agent configuration + +`agent_config` section configures Agent parameters: + +- `prometheus_port` is a port for Prometheus metrics to be served on (path is `/metrics`). +- `http_port` is the main port for Scaler to connect to. +- `namespaces` is list of namespaces to watch. +- `dry_run` if enabled, Agent will not change number of replicas, just report success. Default: true. + +Example: + +```yaml +agent_config: + prometheus_port: 8080 + http_port: 8081 + namespaces: + - prover-old + - prover-new + dry_run: true +``` + +### Scaler configuration + +`scaler_config` section configures Scaler parameters: + +- `dry_run` if enabled, Scaler will not send any scaler requests. Default: false. +- `prometheus_port` is a port for Prometheus metrics to be served on (path is `/metrics`). +- `prover_job_monitor_url` is full URL to get queue report from prover-job-monitor. +- `agents` is Agent list to send requests to. +- `scaler_run_interval` is interval between re-calculations. Default: 10s. +- `protocol_versions` is a map namespaces to protocol version it processes. Should correspond binary versions running + there! +- `cluster_priorities` is a map cluster name to priority, the lower will be used first. +- `apply_min_to_namespace` specifies current primary namespace to run min number of provers in it. +- `min_provers` is a minimum number of provers to run even if the queue is empty. Default: 0. +- `max_provers` is a map of cluster name to map GPU type to maximum number of provers. +- `prover_speed` is a map GPU to speed divider. Default: 500. +- `long_pending_duration` is time after a pending pod considered long pending and will be relocated to different + cluster. Default: 10m. +- `scaler_targets` subsection is a list of Simple targets: + - `queue_report_field` is name of corresponding queue report section. See example for possible options. + - `deployment` is name of a Deployment to scale. + - `min_replicas` is a minimum number of replicas to run even if the queue is empty. Default: 0. + - `max_replicas` is a map of cluster name to maximum number of replicas. + - `speed` is a divider for corresponding queue. + +Example: + +```yaml +scaler_config: + dry_run: true + prometheus_port: 8082 + prover_job_monitor_url: http://prover-job-monitor.default.svc.cluster.local:3074/queue_report + agents: + - http://prover-autoscaler-agent.cluster1.com + - http://prover-autoscaler-agent.cluster2.com + - http://prover-autoscaler-agent.cluster3.com + scaler_run_interval: 30s + protocol_versions: + prover-old: 0.24.2 + prover-new: 0.25.0 + cluster_priorities: + cluster1: 0 + cluster2: 100 + cluster3: 200 + apply_min_to_namespace: prover-new + min_provers: 1 + max_provers: + cluster1: + L4: 1 + T4: 200 + cluster2: + L4: 100 + T4: 200 + cluster3: + L4: 100 + T4: 100 + prover_speed: + L4: 500 + T4: 400 + long_pending_duration: 10m + scaler_targets: + - queue_report_field: basic_witness_jobs + deployment: witness-generator-basic-fri + min_replicas: 1 + max_replicas: + cluster1: 10 + cluster2: 20 + speed: 10 + - queue_report_field: leaf_witness_jobs + deployment: witness-generator-leaf-fri + max_replicas: + cluster1: 10 + speed: 10 + - queue_report_field: node_witness_jobs + deployment: witness-generator-node-fri + max_replicas: + cluster1: 10 + speed: 10 + - queue_report_field: recursion_tip_witness_jobs + deployment: witness-generator-recursion-tip-fri + max_replicas: + cluster1: 10 + speed: 10 + - queue_report_field: scheduler_witness_jobs + deployment: witness-generator-scheduler-fri + max_replicas: + cluster1: 10 + speed: 10 + - queue_report_field: proof_compressor_jobs + deployment: proof-fri-gpu-compressor + max_replicas: + cluster1: 10 + cluster2: 10 + speed: 5 +``` diff --git a/prover/crates/bin/prover_autoscaler/src/agent.rs b/prover/crates/bin/prover_autoscaler/src/agent.rs index 3269a43815c9..030636ad6592 100644 --- a/prover/crates/bin/prover_autoscaler/src/agent.rs +++ b/prover/crates/bin/prover_autoscaler/src/agent.rs @@ -84,19 +84,19 @@ async fn get_cluster(State(app): State) -> Result, AppError> Ok(Json(cluster)) } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Default, Serialize, Deserialize)] pub struct ScaleDeploymentRequest { pub namespace: String, pub name: String, pub size: i32, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Default, Serialize, Deserialize)] pub struct ScaleRequest { pub deployments: Vec, } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Default, Serialize, Deserialize)] pub struct ScaleResponse { pub scale_result: Vec, } diff --git a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs index b074e0774c97..db215e570ef8 100644 --- a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs +++ b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs @@ -29,30 +29,32 @@ where ordered.serialize(serializer) } +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct ScaleEvent { + pub name: String, + pub time: DateTime, +} + #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct Namespace { #[serde(serialize_with = "ordered_map")] pub deployments: HashMap, pub pods: HashMap, + #[serde(default)] + pub scale_errors: Vec, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct Cluster { pub name: String, pub namespaces: HashMap, } -impl Default for Cluster { - fn default() -> Self { - Self { - name: "".to_string(), - namespaces: HashMap::new(), - } - } -} #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct Clusters { pub clusters: HashMap, + /// Map from cluster to index in agent URLs Vec. + pub agent_ids: HashMap, } #[derive(Default, Debug, EnumString, Display, Hash, PartialEq, Eq, Clone, Copy)] @@ -63,4 +65,5 @@ pub enum PodStatus { Pending, LongPending, NeedToMove, + Failed, } diff --git a/prover/crates/bin/prover_autoscaler/src/config.rs b/prover/crates/bin/prover_autoscaler/src/config.rs new file mode 100644 index 000000000000..ff3bccf79c83 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/config.rs @@ -0,0 +1,192 @@ +use std::{collections::HashMap, path::PathBuf, time::Duration}; + +use anyhow::Context; +use serde::Deserialize; +use strum::Display; +use strum_macros::EnumString; +use vise::EncodeLabelValue; +use zksync_config::configs::ObservabilityConfig; + +/// Config used for running ProverAutoscaler (both Scaler and Agent). +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct ProverAutoscalerConfig { + /// Amount of time ProverJobMonitor will wait all it's tasks to finish. + #[serde( + with = "humantime_serde", + default = "ProverAutoscalerConfig::default_graceful_shutdown_timeout" + )] + pub graceful_shutdown_timeout: Duration, + pub agent_config: Option, + pub scaler_config: Option, + pub observability: Option, +} + +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct ProverAutoscalerAgentConfig { + /// Port for prometheus metrics connection. + pub prometheus_port: u16, + /// HTTP port for global Scaler to connect to the Agent running in a cluster. + pub http_port: u16, + /// List of namespaces to watch. + #[serde(default = "ProverAutoscalerAgentConfig::default_namespaces")] + pub namespaces: Vec, + /// If dry-run enabled don't do any k8s updates, just report success. + #[serde(default = "ProverAutoscalerAgentConfig::default_dry_run")] + pub dry_run: bool, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Default)] +pub struct ProverAutoscalerScalerConfig { + /// Port for prometheus metrics connection. + pub prometheus_port: u16, + /// The interval between runs for global Scaler. + #[serde( + with = "humantime_serde", + default = "ProverAutoscalerScalerConfig::default_scaler_run_interval" + )] + pub scaler_run_interval: Duration, + /// URL to get queue reports from. + /// In production should be "http://prover-job-monitor.stage2.svc.cluster.local:3074/queue_report". + #[serde(default = "ProverAutoscalerScalerConfig::default_prover_job_monitor_url")] + pub prover_job_monitor_url: String, + /// List of ProverAutoscaler Agents to get cluster data from. + pub agents: Vec, + /// Mapping of namespaces to protocol versions. + pub protocol_versions: HashMap, + /// Default priorities, which cluster to prefer when there is no other information. + pub cluster_priorities: HashMap, + /// Prover speed per GPU. Used to calculate desired number of provers for queue size. + pub prover_speed: HashMap, + /// Maximum number of provers which can be run per cluster/GPU. + pub max_provers: HashMap>, + /// Minimum number of provers globally. + #[serde(default)] + pub min_provers: u32, + /// Name of primary namespace, all min numbers are applied to it. + pub apply_min_to_namespace: Option, + /// Duration after which pending pod considered long pending. + #[serde( + with = "humantime_serde", + default = "ProverAutoscalerScalerConfig::default_long_pending_duration" + )] + pub long_pending_duration: Duration, + /// List of simple autoscaler targets. + pub scaler_targets: Vec, + /// If dry-run enabled don't send any scale requests. + #[serde(default)] + pub dry_run: bool, +} + +#[derive( + Default, + Debug, + Display, + Hash, + PartialEq, + Eq, + Clone, + Copy, + Ord, + PartialOrd, + EnumString, + EncodeLabelValue, + Deserialize, +)] +pub enum Gpu { + #[default] + Unknown, + #[strum(ascii_case_insensitive)] + L4, + #[strum(ascii_case_insensitive)] + T4, + #[strum(ascii_case_insensitive)] + V100, + #[strum(ascii_case_insensitive)] + P100, + #[strum(ascii_case_insensitive)] + A100, +} + +// TODO: generate this enum by QueueReport from https://github.com/matter-labs/zksync-era/blob/main/prover/crates/bin/prover_job_monitor/src/autoscaler_queue_reporter.rs#L23 +// and remove allowing of non_camel_case_types by generating field name parser. +#[derive(Debug, Display, PartialEq, Eq, Hash, Clone, Copy, Deserialize, EnumString, Default)] +#[allow(non_camel_case_types)] +pub enum QueueReportFields { + #[strum(ascii_case_insensitive)] + basic_witness_jobs, + #[strum(ascii_case_insensitive)] + leaf_witness_jobs, + #[strum(ascii_case_insensitive)] + node_witness_jobs, + #[strum(ascii_case_insensitive)] + recursion_tip_witness_jobs, + #[strum(ascii_case_insensitive)] + scheduler_witness_jobs, + #[strum(ascii_case_insensitive)] + proof_compressor_jobs, + #[default] + #[strum(ascii_case_insensitive)] + prover_jobs, +} + +/// ScalerTarget can be configured to autoscale any of services for which queue is reported by +/// prover-job-monitor, except of provers. Provers need special treatment due to GPU requirement. +#[derive(Debug, Clone, PartialEq, Deserialize, Default)] +pub struct ScalerTarget { + pub queue_report_field: QueueReportFields, + pub deployment: String, + /// Min replicas globally. + #[serde(default)] + pub min_replicas: usize, + /// Max replicas per cluster. + pub max_replicas: HashMap, + /// The queue will be divided by the speed and rounded up to get number of replicas. + #[serde(default = "ScalerTarget::default_speed")] + pub speed: usize, +} + +impl ProverAutoscalerConfig { + /// Default graceful shutdown timeout -- 5 seconds + pub fn default_graceful_shutdown_timeout() -> Duration { + Duration::from_secs(5) + } +} + +impl ProverAutoscalerAgentConfig { + pub fn default_namespaces() -> Vec { + vec!["prover-blue".to_string(), "prover-red".to_string()] + } + + pub fn default_dry_run() -> bool { + true + } +} + +impl ProverAutoscalerScalerConfig { + /// Default scaler_run_interval -- 10s + pub fn default_scaler_run_interval() -> Duration { + Duration::from_secs(10) + } + + /// Default prover_job_monitor_url -- cluster local URL + pub fn default_prover_job_monitor_url() -> String { + "http://localhost:3074/queue_report".to_string() + } + + /// Default long_pending_duration -- 10m + pub fn default_long_pending_duration() -> Duration { + Duration::from_secs(600) + } +} + +impl ScalerTarget { + pub fn default_speed() -> usize { + 1 + } +} + +pub fn config_from_yaml(path: &PathBuf) -> anyhow::Result { + let yaml = std::fs::read_to_string(path) + .with_context(|| format!("failed to read {}", path.display()))?; + Ok(serde_yaml::from_str(&yaml)?) +} diff --git a/prover/crates/bin/prover_autoscaler/src/global/queuer.rs b/prover/crates/bin/prover_autoscaler/src/global/queuer.rs index 1ef5d96386b5..baeb5b70a4ef 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/queuer.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/queuer.rs @@ -1,13 +1,24 @@ -use std::collections::HashMap; +use std::{collections::HashMap, ops::Deref}; use anyhow::{Context, Ok}; use reqwest::Method; -use zksync_prover_job_monitor::autoscaler_queue_reporter::VersionedQueueReport; +use zksync_prover_job_monitor::autoscaler_queue_reporter::{QueueReport, VersionedQueueReport}; use zksync_utils::http_with_retries::send_request_with_retries; -#[derive(Debug)] -pub struct Queue { - pub queue: HashMap, +use crate::{ + config::QueueReportFields, + metrics::{AUTOSCALER_METRICS, DEFAULT_ERROR_CODE}, +}; + +const MAX_RETRIES: usize = 5; + +pub struct Queue(HashMap<(String, QueueReportFields), u64>); + +impl Deref for Queue { + type Target = HashMap<(String, QueueReportFields), u64>; + fn deref(&self) -> &Self::Target { + &self.0 + } } #[derive(Default)] @@ -15,6 +26,19 @@ pub struct Queuer { pub prover_job_monitor_url: String, } +fn target_to_queue(target: QueueReportFields, report: &QueueReport) -> u64 { + let res = match target { + QueueReportFields::basic_witness_jobs => report.basic_witness_jobs.all(), + QueueReportFields::leaf_witness_jobs => report.leaf_witness_jobs.all(), + QueueReportFields::node_witness_jobs => report.node_witness_jobs.all(), + QueueReportFields::recursion_tip_witness_jobs => report.recursion_tip_witness_jobs.all(), + QueueReportFields::scheduler_witness_jobs => report.scheduler_witness_jobs.all(), + QueueReportFields::proof_compressor_jobs => report.proof_compressor_jobs.all(), + QueueReportFields::prover_jobs => report.prover_jobs.all(), + }; + res as u64 +} + impl Queuer { pub fn new(pjm_url: String) -> Self { Self { @@ -22,20 +46,33 @@ impl Queuer { } } - pub async fn get_queue(&self) -> anyhow::Result { + /// Requests queue report from prover-job-monitor and parse it into Queue HashMap for provided + /// list of jobs. + pub async fn get_queue(&self, jobs: &[QueueReportFields]) -> anyhow::Result { let url = &self.prover_job_monitor_url; - let response = send_request_with_retries(url, 5, Method::GET, None, None).await; - let res = response - .map_err(|err| anyhow::anyhow!("Failed fetching queue from url: {url}: {err:?}"))? + let response = send_request_with_retries(url, MAX_RETRIES, Method::GET, None, None).await; + let response = response.map_err(|err| { + AUTOSCALER_METRICS.calls[&(url.clone(), DEFAULT_ERROR_CODE)].inc(); + anyhow::anyhow!("Failed fetching queue from URL: {url}: {err:?}") + })?; + + AUTOSCALER_METRICS.calls[&(url.clone(), response.status().as_u16())].inc(); + let response = response .json::>() .await .context("Failed to read response as json")?; - - Ok(Queue { - queue: res + Ok(Queue( + response .iter() - .map(|x| (x.version.to_string(), x.report.prover_jobs.queued as u64)) + .flat_map(|versioned_report| { + jobs.iter().map(move |j| { + ( + (versioned_report.version.to_string(), *j), + target_to_queue(*j, &versioned_report.report), + ) + }) + }) .collect::>(), - }) + )) } } diff --git a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs index 9f37c4d11675..074da383b740 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs @@ -4,11 +4,12 @@ use chrono::Utc; use debug_map_sorted::SortedOutputExt; use once_cell::sync::Lazy; use regex::Regex; -use zksync_config::configs::prover_autoscaler::{Gpu, ProverAutoscalerScalerConfig}; use super::{queuer, watcher}; use crate::{ + agent::{ScaleDeploymentRequest, ScaleRequest}, cluster_types::{Cluster, Clusters, Pod, PodStatus}, + config::{Gpu, ProverAutoscalerScalerConfig, QueueReportFields, ScalerTarget}, metrics::AUTOSCALER_METRICS, task_wiring::Task, }; @@ -20,7 +21,7 @@ struct GPUPool { name: String, gpu: Gpu, provers: HashMap, // TODO: consider using i64 everywhere to avoid type casts. - preemtions: u64, + scale_errors: usize, max_pool_size: u32, } @@ -44,9 +45,19 @@ struct GPUPoolKey { } static PROVER_DEPLOYMENT_RE: Lazy = - Lazy::new(|| Regex::new(r"^prover-gpu-fri-spec-(\d{1,2})?(-(?[ltvpa]\d+))?$").unwrap()); + Lazy::new(|| Regex::new(r"^circuit-prover-gpu(-(?[ltvpa]\d+))?$").unwrap()); static PROVER_POD_RE: Lazy = - Lazy::new(|| Regex::new(r"^prover-gpu-fri-spec-(\d{1,2})?(-(?[ltvpa]\d+))?").unwrap()); + Lazy::new(|| Regex::new(r"^circuit-prover-gpu(-(?[ltvpa]\d+))?").unwrap()); + +/// gpu_to_prover converts Gpu type to corresponding deployment name. +fn gpu_to_prover(gpu: Gpu) -> String { + let s = "circuit-prover-gpu"; + match gpu { + Gpu::Unknown => "".into(), + Gpu::L4 => s.into(), + _ => format!("{}-{}", s, gpu.to_string().to_lowercase()), + } +} pub struct Scaler { /// namespace to Protocol Version configuration. @@ -54,12 +65,33 @@ pub struct Scaler { watcher: watcher::Watcher, queuer: queuer::Queuer, + jobs: Vec, + prover_scaler: GpuScaler, + simple_scalers: Vec, +} + +pub struct GpuScaler { /// Which cluster to use first. cluster_priorities: HashMap, + apply_min_to_namespace: Option, + min_provers: u32, + max_provers: HashMap>, prover_speed: HashMap, long_pending_duration: chrono::Duration, } +pub struct SimpleScaler { + queue_report_field: QueueReportFields, + deployment: String, + /// Which cluster to use first. + cluster_priorities: HashMap, + apply_min_to_namespace: Option, + min_replicas: usize, + max_replicas: HashMap, + speed: usize, + long_pending_duration: chrono::Duration, +} + struct ProverPodGpu<'a> { name: &'a str, pod: &'a Pod, @@ -82,18 +114,51 @@ impl Scaler { queuer: queuer::Queuer, config: ProverAutoscalerScalerConfig, ) -> Self { + config + .protocol_versions + .iter() + .for_each(|(namespace, version)| { + AUTOSCALER_METRICS.prover_protocol_version[&(namespace.clone(), version.clone())] + .set(1); + }); + + let mut simple_scalers = Vec::default(); + let mut jobs = vec![QueueReportFields::prover_jobs]; + for c in &config.scaler_targets { + jobs.push(c.queue_report_field); + simple_scalers.push(SimpleScaler::new( + c, + config.cluster_priorities.clone(), + config.apply_min_to_namespace.clone(), + chrono::Duration::seconds(config.long_pending_duration.as_secs() as i64), + )) + } Self { - namespaces: config.protocol_versions, + namespaces: config.protocol_versions.clone(), watcher, queuer, + jobs, + prover_scaler: GpuScaler::new(config), + simple_scalers, + } + } +} + +impl GpuScaler { + pub fn new(config: ProverAutoscalerScalerConfig) -> Self { + Self { cluster_priorities: config.cluster_priorities, + apply_min_to_namespace: config.apply_min_to_namespace, + min_provers: config.min_provers, + max_provers: config.max_provers, prover_speed: config.prover_speed, long_pending_duration: chrono::Duration::seconds( - config.long_pending_duration.whole_seconds(), + config.long_pending_duration.as_secs() as i64, ), } } + /// Converts a single cluster into vec of GPUPools, one for each GPU. fn convert_to_gpu_pool(&self, namespace: &String, cluster: &Cluster) -> Vec { let mut gp_map = HashMap::new(); // let Some(namespace_value) = &cluster.namespaces.get(namespace) else { @@ -112,7 +177,17 @@ impl Scaler { let e = gp_map.entry(gpu).or_insert(GPUPool { name: cluster.name.clone(), gpu, - max_pool_size: 100, // TODO: get from the agent. + max_pool_size: self + .max_provers + .get(&cluster.name) + .and_then(|inner_map| inner_map.get(&gpu)) + .copied() + .unwrap_or(0), + scale_errors: namespace_value + .scale_errors + .iter() + .filter(|v| v.time < Utc::now() - chrono::Duration::hours(1)) // TODO Move the duration into config. + .count(), ..Default::default() }); @@ -120,6 +195,12 @@ impl Scaler { e.provers.insert(PodStatus::Running, 0); } + let recent_scale_errors = namespace_value + .scale_errors + .iter() + .filter(|v| v.time < Utc::now() - chrono::Duration::minutes(4)) // TODO Move the duration into config. This should be at least x2 or run interval. + .count(); + for ppg in namespace_value .pods .iter() @@ -131,10 +212,12 @@ impl Scaler { ..Default::default() }); let mut status = PodStatus::from_str(&ppg.pod.status).unwrap_or_default(); - if status == PodStatus::Pending - && ppg.pod.changed < Utc::now() - self.long_pending_duration - { - status = PodStatus::LongPending; + if status == PodStatus::Pending { + if ppg.pod.changed < Utc::now() - self.long_pending_duration { + status = PodStatus::LongPending; + } else if recent_scale_errors > 0 { + status = PodStatus::NeedToMove; + } } tracing::info!( "pod {}: status: {}, real status: {}", @@ -145,7 +228,7 @@ impl Scaler { e.provers.entry(status).and_modify(|n| *n += 1).or_insert(1); } - tracing::info!("From pods {:?}", gp_map.sorted_debug()); + tracing::debug!("From pods {:?}", gp_map.sorted_debug()); gp_map.into_values().collect() } @@ -168,7 +251,7 @@ impl Scaler { a.sum_by_pod_status(PodStatus::LongPending) .cmp(&b.sum_by_pod_status(PodStatus::LongPending)), ) // Sort by long Pending pods. - .then(a.preemtions.cmp(&b.preemtions)) // Sort by preemtions in the cluster. + .then(a.scale_errors.cmp(&b.scale_errors)) // Sort by scale_errors in the cluster. .then( self.cluster_priorities .get(&a.name) @@ -178,6 +261,10 @@ impl Scaler { .then(b.max_pool_size.cmp(&a.max_pool_size)) // Reverse sort by cluster size. }); + gpu_pools.iter().for_each(|p| { + AUTOSCALER_METRICS.scale_errors[&p.name.clone()].set(p.scale_errors as u64); + }); + gpu_pools } @@ -193,16 +280,25 @@ impl Scaler { self.speed(gpu) * n as u64 } - fn normalize_queue(&self, gpu: Gpu, q: u64) -> u64 { + fn normalize_queue(&self, gpu: Gpu, queue: u64) -> u64 { let speed = self.speed(gpu); // Divide and round up if there's any remainder. - (q + speed - 1) / speed * speed + (queue + speed - 1) / speed * speed } - fn run(&self, namespace: &String, q: u64, clusters: &Clusters) -> HashMap { + fn run(&self, namespace: &String, queue: u64, clusters: &Clusters) -> HashMap { let sc = self.sorted_clusters(namespace, clusters); tracing::debug!("Sorted clusters for namespace {}: {:?}", namespace, &sc); + // Increase queue size, if it's too small, to make sure that required min_provers are + // running. + let queue: u64 = if self.apply_min_to_namespace.as_deref() == Some(namespace.as_str()) { + self.normalize_queue(Gpu::L4, queue) + .max(self.provers_to_speed(Gpu::L4, self.min_provers)) + } else { + queue + }; + let mut total: i64 = 0; let mut provers: HashMap = HashMap::new(); for c in &sc { @@ -221,9 +317,9 @@ impl Scaler { } // Remove unneeded pods. - if (total as u64) > self.normalize_queue(Gpu::L4, q) { + if (total as u64) > self.normalize_queue(Gpu::L4, queue) { for c in sc.iter().rev() { - let mut excess_queue = total as u64 - self.normalize_queue(c.gpu, q); + let mut excess_queue = total as u64 - self.normalize_queue(c.gpu, queue); let mut excess_provers = (excess_queue / self.speed(c.gpu)) as u32; let p = provers.entry(c.to_key()).or_default(); if *p < excess_provers { @@ -248,11 +344,11 @@ impl Scaler { } } - tracing::debug!("Queue coverd with provers: {}", total); + tracing::debug!("Queue covered with provers: {}", total); // Add required provers. - if (total as u64) < q { + if (total as u64) < queue { for c in &sc { - let mut required_queue = q - total as u64; + let mut required_queue = queue - total as u64; let mut required_provers = (self.normalize_queue(c.gpu, required_queue) / self.speed(c.gpu)) as u32; let p = provers.entry(c.to_key()).or_default(); @@ -265,29 +361,367 @@ impl Scaler { } } - tracing::debug!("run result: provers {:?}, total: {}", &provers, total); + tracing::debug!( + "run result for namespace {}: provers {:?}, total: {}", + namespace, + &provers, + total + ); + + provers + } + fn diff( + namespace: &str, + provers: HashMap, + clusters: &Clusters, + requests: &mut HashMap, + ) { provers + .into_iter() + .for_each(|(GPUPoolKey { cluster, gpu }, replicas)| { + let prover = gpu_to_prover(gpu); + clusters + .clusters + .get(&cluster) + .and_then(|c| c.namespaces.get(namespace)) + .and_then(|ns| ns.deployments.get(&prover)) + .map_or_else( + || { + tracing::error!( + "Wasn't able to find deployment {} in cluster {}, namespace {}", + prover, + cluster, + namespace + ) + }, + |deployment| { + if deployment.desired != replicas as i32 { + requests + .entry(cluster.clone()) + .or_default() + .deployments + .push(ScaleDeploymentRequest { + namespace: namespace.into(), + name: prover.clone(), + size: replicas as i32, + }); + } + }, + ); + }) + } +} + +#[derive(Default, Debug, PartialEq, Eq)] +struct Pool { + name: String, + pods: HashMap, + scale_errors: usize, + max_pool_size: usize, +} + +impl Pool { + fn sum_by_pod_status(&self, ps: PodStatus) -> usize { + self.pods.get(&ps).cloned().unwrap_or(0) + } +} + +impl SimpleScaler { + pub fn new( + config: &ScalerTarget, + cluster_priorities: HashMap, + apply_min_to_namespace: Option, + long_pending_duration: chrono::Duration, + ) -> Self { + Self { + queue_report_field: config.queue_report_field, + deployment: config.deployment.clone(), + cluster_priorities, + apply_min_to_namespace, + min_replicas: config.min_replicas, + max_replicas: config.max_replicas.clone(), + speed: config.speed, + long_pending_duration, + } + } + + fn convert_to_pool(&self, namespace: &String, cluster: &Cluster) -> Option { + let Some(namespace_value) = &cluster.namespaces.get(namespace) else { + // No namespace in config, ignoring. + return None; + }; + + // TODO: Check if related deployment exists. + let mut pool = Pool { + name: cluster.name.clone(), + max_pool_size: self.max_replicas.get(&cluster.name).copied().unwrap_or(0), + scale_errors: namespace_value + .scale_errors + .iter() + .filter(|v| v.time < Utc::now() - chrono::Duration::hours(1)) // TODO Move the duration into config. + .count(), + ..Default::default() + }; + + // Initialize pool only if we have ready deployments. + pool.pods.insert(PodStatus::Running, 0); + + let pod_re = Regex::new(&format!("^{}-", self.deployment)).unwrap(); + for (_, pod) in namespace_value + .pods + .iter() + .filter(|(name, _)| pod_re.is_match(name)) + { + let mut status = PodStatus::from_str(&pod.status).unwrap_or_default(); + if status == PodStatus::Pending && pod.changed < Utc::now() - self.long_pending_duration + { + status = PodStatus::LongPending; + } + pool.pods.entry(status).and_modify(|n| *n += 1).or_insert(1); + } + + tracing::debug!("Pool pods {:?}", pool); + + Some(pool) + } + + fn sorted_clusters(&self, namespace: &String, clusters: &Clusters) -> Vec { + let mut pools: Vec = clusters + .clusters + .values() + .flat_map(|c| self.convert_to_pool(namespace, c)) + .collect(); + + pools.sort_by(|a, b| { + a.sum_by_pod_status(PodStatus::NeedToMove) + .cmp(&b.sum_by_pod_status(PodStatus::NeedToMove)) // Sort by need to evict. + .then( + a.sum_by_pod_status(PodStatus::LongPending) + .cmp(&b.sum_by_pod_status(PodStatus::LongPending)), + ) // Sort by long Pending pods. + .then(a.scale_errors.cmp(&b.scale_errors)) // Sort by scale_errors in the cluster. + .then( + self.cluster_priorities + .get(&a.name) + .unwrap_or(&1000) + .cmp(self.cluster_priorities.get(&b.name).unwrap_or(&1000)), + ) // Sort by priority. + .then(b.max_pool_size.cmp(&a.max_pool_size)) // Reverse sort by cluster size. + }); + + pools + } + + fn pods_to_speed(&self, n: usize) -> u64 { + (self.speed * n) as u64 + } + + fn normalize_queue(&self, queue: u64) -> u64 { + let speed = self.speed as u64; + // Divide and round up if there's any remainder. + (queue + speed - 1) / speed * speed + } + + fn run(&self, namespace: &String, queue: u64, clusters: &Clusters) -> HashMap { + let sorted_clusters = self.sorted_clusters(namespace, clusters); + tracing::debug!( + "Sorted clusters for namespace {}: {:?}", + namespace, + &sorted_clusters + ); + + // Increase queue size, if it's too small, to make sure that required min_provers are + // running. + let queue: u64 = if self.apply_min_to_namespace.as_deref() == Some(namespace.as_str()) { + self.normalize_queue(queue) + .max(self.pods_to_speed(self.min_replicas)) + } else { + queue + }; + + let mut total: i64 = 0; + let mut pods: HashMap = HashMap::new(); + for cluster in &sorted_clusters { + for (status, replicas) in &cluster.pods { + match status { + PodStatus::Running | PodStatus::Pending => { + total += self.pods_to_speed(*replicas) as i64; + pods.entry(cluster.name.clone()) + .and_modify(|x| *x += replicas) + .or_insert(*replicas); + } + _ => (), // Ignore LongPending as not running here. + } + } + } + + // Remove unneeded pods. + if (total as u64) > self.normalize_queue(queue) { + for cluster in sorted_clusters.iter().rev() { + let mut excess_queue = total as u64 - self.normalize_queue(queue); + let mut excess_pods = excess_queue as usize / self.speed; + let replicas = pods.entry(cluster.name.clone()).or_default(); + if *replicas < excess_pods { + excess_pods = *replicas; + excess_queue = *replicas as u64 * self.speed as u64; + } + *replicas -= excess_pods; + total -= excess_queue as i64; + if total <= 0 { + break; + }; + } + } + + // Reduce load in over capacity pools. + for cluster in &sorted_clusters { + let replicas = pods.entry(cluster.name.clone()).or_default(); + if cluster.max_pool_size < *replicas { + let excess = *replicas - cluster.max_pool_size; + total -= (excess * self.speed) as i64; + *replicas -= excess; + } + } + + tracing::debug!("Queue covered with provers: {}", total); + // Add required pods. + if (total as u64) < queue { + for cluster in &sorted_clusters { + let mut required_queue = queue - total as u64; + let mut required_pods = self.normalize_queue(required_queue) as usize / self.speed; + let replicas = pods.entry(cluster.name.clone()).or_default(); + if *replicas + required_pods > cluster.max_pool_size { + required_pods = cluster.max_pool_size - *replicas; + required_queue = (required_pods * self.speed) as u64; + } + *replicas += required_pods; + total += required_queue as i64; + } + } + + tracing::debug!( + "run result for namespace {}: provers {:?}, total: {}", + namespace, + &pods, + total + ); + + pods + } + + fn diff( + &self, + namespace: &str, + replicas: HashMap, + clusters: &Clusters, + requests: &mut HashMap, + ) { + let deployment_name = self.deployment.clone(); + replicas.into_iter().for_each(|(cluster, replicas)| { + clusters + .clusters + .get(&cluster) + .and_then(|c| c.namespaces.get(namespace)) + .and_then(|ns| ns.deployments.get(&deployment_name)) + .map_or_else( + || { + tracing::error!( + "Wasn't able to find deployment {} in cluster {}, namespace {}", + deployment_name, + cluster, + namespace + ) + }, + |deployment| { + if deployment.desired != replicas as i32 { + requests + .entry(cluster.clone()) + .or_default() + .deployments + .push(ScaleDeploymentRequest { + namespace: namespace.into(), + name: deployment_name.clone(), + size: replicas as i32, + }); + } + }, + ); + }) } } +/// is_namespace_running returns true if there are some pods running in it. +fn is_namespace_running(namespace: &str, clusters: &Clusters) -> bool { + clusters + .clusters + .values() + .flat_map(|v| v.namespaces.iter()) + .filter_map(|(k, v)| if k == namespace { Some(v) } else { None }) + .flat_map(|v| v.deployments.values()) + .map( + |d| d.running + d.desired, // If there is something running or expected to run, we + // should re-evaluate the namespace. + ) + .sum::() + > 0 +} + #[async_trait::async_trait] impl Task for Scaler { async fn invoke(&self) -> anyhow::Result<()> { - let queue = self.queuer.get_queue().await.unwrap(); - - // TODO: Check that clusters data is ready. - let clusters = self.watcher.clusters.lock().await; - for (ns, ppv) in &self.namespaces { - let q = queue.queue.get(ppv).cloned().unwrap_or(0); - if q > 0 { - let provers = self.run(ns, q, &clusters); - for (k, num) in &provers { - AUTOSCALER_METRICS.provers[&(k.cluster.clone(), ns.clone(), k.gpu)] - .set(*num as u64); + let queue = self.queuer.get_queue(&self.jobs).await.unwrap(); + + let mut scale_requests: HashMap = HashMap::new(); + { + let guard = self.watcher.data.lock().await; // Keeping the lock during all calls of run() for + // consitency. + if let Err(err) = watcher::check_is_ready(&guard.is_ready) { + AUTOSCALER_METRICS.clusters_not_ready.inc(); + tracing::warn!("Skipping Scaler run: {}", err); + return Ok(()); + } + + for (ns, ppv) in &self.namespaces { + // Prover + let q = queue + .get(&(ppv.to_string(), QueueReportFields::prover_jobs)) + .cloned() + .unwrap_or(0); + AUTOSCALER_METRICS.queue[&(ns.clone(), "prover".into())].set(q); + tracing::debug!("Running eval for namespace {ns} and PPV {ppv} found queue {q}"); + if q > 0 || is_namespace_running(ns, &guard.clusters) { + let provers = self.prover_scaler.run(ns, q, &guard.clusters); + for (k, num) in &provers { + AUTOSCALER_METRICS.provers[&(k.cluster.clone(), ns.clone(), k.gpu)] + .set(*num as u64); + } + GpuScaler::diff(ns, provers, &guard.clusters, &mut scale_requests); + } + + // Simple Scalers. + for scaler in &self.simple_scalers { + let q = queue + .get(&(ppv.to_string(), scaler.queue_report_field)) + .cloned() + .unwrap_or(0); + AUTOSCALER_METRICS.queue[&(ns.clone(), scaler.deployment.clone())].set(q); + tracing::debug!("Running eval for namespace {ns}, PPV {ppv}, simple scaler {} found queue {q}", scaler.deployment); + if q > 0 || is_namespace_running(ns, &guard.clusters) { + let replicas = scaler.run(ns, q, &guard.clusters); + for (k, num) in &replicas { + AUTOSCALER_METRICS.jobs + [&(scaler.deployment.clone(), k.clone(), ns.clone())] + .set(*num as u64); + } + scaler.diff(ns, replicas, &guard.clusters, &mut scale_requests); + } } - // TODO: compare before and desired, send commands [cluster,namespace,deployment] -> provers } + } // Unlock self.watcher.data. + + if let Err(err) = self.watcher.send_scale(scale_requests).await { + tracing::error!("Failed scale request: {}", err); } Ok(()) @@ -296,65 +730,459 @@ impl Task for Scaler { #[cfg(test)] mod tests { - use std::sync::Arc; - - use tokio::sync::Mutex; - use super::*; - use crate::{ - cluster_types::{self, Deployment, Namespace, Pod}, - global::{queuer, watcher}, - }; + use crate::cluster_types::{Deployment, Namespace, Pod, ScaleEvent}; + #[tracing_test::traced_test] #[test] fn test_run() { - let watcher = watcher::Watcher { - cluster_agents: vec![], - clusters: Arc::new(Mutex::new(cluster_types::Clusters { - ..Default::default() - })), - }; - let queuer = queuer::Queuer { - prover_job_monitor_url: "".to_string(), - }; - let scaler = Scaler::new(watcher, queuer, ProverAutoscalerScalerConfig::default()); - let got = scaler.run( - &"prover".to_string(), - 1499, - &Clusters { - clusters: HashMap::from([( - "foo".to_string(), - Cluster { - name: "foo".to_string(), - namespaces: HashMap::from([( - "prover".to_string(), - Namespace { - deployments: HashMap::from([( - "prover-gpu-fri-spec-1".to_string(), - Deployment { + let scaler = GpuScaler::new(ProverAutoscalerScalerConfig { + cluster_priorities: [("foo".into(), 0), ("bar".into(), 10)].into(), + apply_min_to_namespace: Some("prover-other".into()), + min_provers: 2, + max_provers: [ + ("foo".into(), [(Gpu::L4, 100)].into()), + ("bar".into(), [(Gpu::L4, 100)].into()), + ] + .into(), + ..Default::default() + }); + + assert_eq!( + scaler.run( + &"prover".into(), + 1499, + &Clusters { + clusters: [( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), + pods: [( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + )] + .into(), + ..Default::default() + }, + )] + .into(), + }, + )] + .into(), + ..Default::default() + }, + ), + [( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, + }, + 3, + )] + .into(), + "3 new provers" + ); + assert_eq!( + scaler.run( + &"prover".into(), + 499, + &Clusters { + clusters: [ + ( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), + ..Default::default() + }, + )] + .into(), + }, + ), + ( + "bar".into(), + Cluster { + name: "bar".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment { + running: 1, + desired: 1, + }, + )] + .into(), + pods: [( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + )] + .into(), + ..Default::default() + }, + )] + .into(), + }, + ) + ] + .into(), + ..Default::default() + }, + ), + [ + ( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, + }, + 0, + ), + ( + GPUPoolKey { + cluster: "bar".into(), + gpu: Gpu::L4, + }, + 1, + ) + ] + .into(), + "Preserve running" + ); + } + + #[tracing_test::traced_test] + #[test] + fn test_run_min_provers() { + let scaler = GpuScaler::new(ProverAutoscalerScalerConfig { + cluster_priorities: [("foo".into(), 0), ("bar".into(), 10)].into(), + apply_min_to_namespace: Some("prover".into()), + min_provers: 2, + max_provers: [ + ("foo".into(), [(Gpu::L4, 100)].into()), + ("bar".into(), [(Gpu::L4, 100)].into()), + ] + .into(), + ..Default::default() + }); + + assert_eq!( + scaler.run( + &"prover".into(), + 10, + &Clusters { + clusters: [ + ( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), ..Default::default() }, - )]), - pods: HashMap::from([( - "prover-gpu-fri-spec-1-c47644679-x9xqp".to_string(), - Pod { - status: "Running".to_string(), + )] + .into(), + }, + ), + ( + "bar".into(), + Cluster { + name: "bar".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), ..Default::default() }, - )]), + )] + .into(), }, - )]), + ) + ] + .into(), + ..Default::default() + }, + ), + [ + ( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, + }, + 2, + ), + ( + GPUPoolKey { + cluster: "bar".into(), + gpu: Gpu::L4, + }, + 0, + ) + ] + .into(), + "Min 2 provers, non running" + ); + assert_eq!( + scaler.run( + &"prover".into(), + 0, + &Clusters { + clusters: [ + ( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment { + running: 3, + desired: 3, + }, + )] + .into(), + pods: [ + ( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ), + ( + "circuit-prover-gpu-7c5f8fc747-gmtc2".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ), + ( + "circuit-prover-gpu-7c5f8fc747-gmtc3".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ) + ] + .into(), + ..Default::default() + }, + )] + .into(), + }, + ), + ( + "bar".into(), + Cluster { + name: "bar".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment { + running: 2, + desired: 2, + }, + )] + .into(), + pods: [ + ( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ), + ( + "circuit-prover-gpu-7c5f8fc747-gmtc2".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ) + ] + .into(), + ..Default::default() + }, + )] + .into(), + }, + ) + ] + .into(), + ..Default::default() + }, + ), + [ + ( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, + }, + 2, + ), + ( + GPUPoolKey { + cluster: "bar".into(), + gpu: Gpu::L4, + }, + 0, + ) + ] + .into(), + "Min 2 provers, 5 running" + ); + } + + #[tracing_test::traced_test] + #[test] + fn test_run_need_move() { + let scaler = GpuScaler::new(ProverAutoscalerScalerConfig { + cluster_priorities: [("foo".into(), 0), ("bar".into(), 10)].into(), + apply_min_to_namespace: Some("prover".into()), + min_provers: 2, + max_provers: [ + ("foo".into(), [(Gpu::L4, 100)].into()), + ("bar".into(), [(Gpu::L4, 100)].into()), + ] + .into(), + long_pending_duration: ProverAutoscalerScalerConfig::default_long_pending_duration(), + ..Default::default() + }); + + assert_eq!( + scaler.run( + &"prover".into(), + 1400, + &Clusters { + clusters: [ + ( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment { + running: 3, + desired: 3, + }, + )] + .into(), + pods: [ + ( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + changed: Utc::now(), + ..Default::default() + }, + ), + ( + "circuit-prover-gpu-7c5f8fc747-gmtc2".into(), + Pod { + status: "Pending".into(), + changed: Utc::now(), + ..Default::default() + }, + ), + ( + "circuit-prover-gpu-7c5f8fc747-gmtc3".into(), + Pod { + status: "Running".into(), + changed: Utc::now(), + ..Default::default() + }, + ) + ] + .into(), + scale_errors: vec![ScaleEvent { + name: "circuit-prover-gpu-7c5f8fc747-gmtc2.123456" + .into(), + time: Utc::now() - chrono::Duration::hours(1) + }], + }, + )] + .into(), + }, + ), + ( + "bar".into(), + Cluster { + name: "bar".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), + ..Default::default() + }, + )] + .into(), + }, + ) + ] + .into(), + ..Default::default() + }, + ), + [ + ( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, + }, + 2, + ), + ( + GPUPoolKey { + cluster: "bar".into(), + gpu: Gpu::L4, }, - )]), - }, + 1, + ) + ] + .into(), + "Move 1 prover to bar" ); - let want = HashMap::from([( - GPUPoolKey { - cluster: "foo".to_string(), - gpu: Gpu::L4, - }, - 3, - )]); - assert!(got == want); } } diff --git a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs index ef3ebd3b8193..95b9e32cac5b 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs @@ -1,26 +1,50 @@ use std::{collections::HashMap, sync::Arc}; -use anyhow::{Context, Ok}; +use anyhow::{anyhow, Context, Ok, Result}; use futures::future; -use reqwest::Method; +use reqwest::{ + header::{HeaderMap, HeaderValue, CONTENT_TYPE}, + Method, +}; use tokio::sync::Mutex; use url::Url; use zksync_utils::http_with_retries::send_request_with_retries; use crate::{ + agent::{ScaleRequest, ScaleResponse}, cluster_types::{Cluster, Clusters}, + metrics::{AUTOSCALER_METRICS, DEFAULT_ERROR_CODE}, task_wiring::Task, }; -#[derive(Clone)] +const MAX_RETRIES: usize = 5; + +#[derive(Default)] +pub struct WatchedData { + pub clusters: Clusters, + pub is_ready: Vec, +} + +pub fn check_is_ready(v: &Vec) -> Result<()> { + for b in v { + if !b { + return Err(anyhow!("Clusters data is not ready")); + } + } + Ok(()) +} + +#[derive(Default, Clone)] pub struct Watcher { /// List of base URLs of all agents. pub cluster_agents: Vec>, - pub clusters: Arc>, + pub dry_run: bool, + pub data: Arc>, } impl Watcher { - pub fn new(agent_urls: Vec) -> Self { + pub fn new(agent_urls: Vec, dry_run: bool) -> Self { + let size = agent_urls.len(); Self { cluster_agents: agent_urls .into_iter() @@ -31,11 +55,103 @@ impl Watcher { ) }) .collect(), - clusters: Arc::new(Mutex::new(Clusters { - clusters: HashMap::new(), + dry_run, + data: Arc::new(Mutex::new(WatchedData { + clusters: Clusters::default(), + is_ready: vec![false; size], })), } } + + pub async fn send_scale(&self, requests: HashMap) -> anyhow::Result<()> { + let id_requests: HashMap; + { + // Convert cluster names into ids. Holding the data lock. + let guard = self.data.lock().await; + id_requests = requests + .into_iter() + .filter_map(|(cluster, scale_request)| { + guard.clusters.agent_ids.get(&cluster).map_or_else( + || { + tracing::error!("Failed to find id for cluster {}", cluster); + None + }, + |id| Some((*id, scale_request)), + ) + }) + .collect(); + } + + let dry_run = self.dry_run; + let handles: Vec<_> = id_requests + .into_iter() + .map(|(id, sr)| { + let url: String = self.cluster_agents[id] + .clone() + .join("/scale") + .unwrap() + .to_string(); + tracing::debug!("Sending scale request to {}, data: {:?}.", url, sr); + tokio::spawn(async move { + let mut headers = HeaderMap::new(); + headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); + if dry_run { + tracing::info!("Dry-run mode, not sending the request."); + return Ok((id, Ok(ScaleResponse::default()))); + } + let response = send_request_with_retries( + &url, + MAX_RETRIES, + Method::POST, + Some(headers), + Some(serde_json::to_vec(&sr)?), + ) + .await; + let response = response.map_err(|err| { + AUTOSCALER_METRICS.calls[&(url.clone(), DEFAULT_ERROR_CODE)].inc(); + anyhow::anyhow!("Failed fetching cluster from url: {url}: {err:?}") + })?; + AUTOSCALER_METRICS.calls[&(url, response.status().as_u16())].inc(); + let response = response + .json::() + .await + .context("Failed to read response as json"); + Ok((id, response)) + }) + }) + .collect(); + + future::try_join_all( + future::join_all(handles) + .await + .into_iter() + .map(|h| async move { + let (id, res) = h??; + + let errors: Vec<_> = res + .expect("failed to do request to Agent") + .scale_result + .iter() + .filter_map(|e| { + if !e.is_empty() { + Some(format!("Agent {} failed to scale: {}", id, e)) + } else { + None + } + }) + .collect(); + + if !errors.is_empty() { + return Err(anyhow!(errors.join(";"))); + } + Ok(()) + }) + .collect::>(), + ) + .await?; + + Ok(()) + } } #[async_trait::async_trait] @@ -45,7 +161,8 @@ impl Task for Watcher { .cluster_agents .clone() .into_iter() - .map(|a| { + .enumerate() + .map(|(i, a)| { tracing::debug!("Getting cluster data from agent {}.", a); tokio::spawn(async move { let url: String = a @@ -54,14 +171,19 @@ impl Task for Watcher { .context("Failed to join URL with /cluster")? .to_string(); let response = - send_request_with_retries(&url, 5, Method::GET, None, None).await; - response - .map_err(|err| { - anyhow::anyhow!("Failed fetching cluster from url: {url}: {err:?}") - })? + send_request_with_retries(&url, MAX_RETRIES, Method::GET, None, None).await; + + let response = response.map_err(|err| { + // TODO: refactor send_request_with_retries to return status. + AUTOSCALER_METRICS.calls[&(url.clone(), DEFAULT_ERROR_CODE)].inc(); + anyhow::anyhow!("Failed fetching cluster from url: {url}: {err:?}") + })?; + AUTOSCALER_METRICS.calls[&(url, response.status().as_u16())].inc(); + let response = response .json::() .await - .context("Failed to read response as json") + .context("Failed to read response as json"); + Ok((i, response)) }) }) .collect(); @@ -71,18 +193,17 @@ impl Task for Watcher { .await .into_iter() .map(|h| async move { - let c = h.unwrap().unwrap(); - self.clusters - .lock() - .await - .clusters - .insert(c.name.clone(), c); + let (i, res) = h??; + let c = res?; + let mut guard = self.data.lock().await; + guard.clusters.agent_ids.insert(c.name.clone(), i); + guard.clusters.clusters.insert(c.name.clone(), c); + guard.is_ready[i] = true; Ok(()) }) .collect::>(), ) - .await - .unwrap(); + .await?; Ok(()) } diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs b/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs index 170b0b106507..5e6f56aacc93 100644 --- a/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs +++ b/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs @@ -4,9 +4,14 @@ use kube::api::{Api, Patch, PatchParams}; #[derive(Clone)] pub struct Scaler { pub client: kube::Client, + dry_run: bool, } impl Scaler { + pub fn new(client: kube::Client, dry_run: bool) -> Self { + Self { client, dry_run } + } + pub async fn scale(&self, namespace: &str, name: &str, size: i32) -> anyhow::Result<()> { let deployments: Api = Api::namespaced(self.client.clone(), namespace); @@ -18,6 +23,16 @@ impl Scaler { "replicas": size } }); + + if self.dry_run { + tracing::info!( + "Dry run of scaled deployment/{} to {} replica(s).", + name, + size + ); + return Ok(()); + } + let pp = PatchParams::default(); deployments.patch(name, &pp, &Patch::Merge(patch)).await?; tracing::info!("Scaled deployment/{} to {} replica(s).", name, size); diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs index 8746d17663be..b8476ab475ab 100644 --- a/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs +++ b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs @@ -1,18 +1,21 @@ use std::{collections::HashMap, sync::Arc}; -use chrono::Utc; +use anyhow::Context; +use chrono::{DateTime, Utc}; use futures::{stream, StreamExt, TryStreamExt}; use k8s_openapi::api; use kube::{ api::{Api, ResourceExt}, runtime::{watcher, WatchStreamExt}, }; +use reqwest::{ + header::{HeaderMap, HeaderValue}, + Method, +}; use tokio::sync::Mutex; +use zksync_utils::http_with_retries::send_request_with_retries; -use crate::{ - cluster_types::{Cluster, Deployment, Namespace, Pod}, - metrics::AUTOSCALER_METRICS, -}; +use crate::cluster_types::{Cluster, Deployment, Namespace, Pod, ScaleEvent}; #[derive(Clone)] pub struct Watcher { @@ -20,13 +23,37 @@ pub struct Watcher { pub cluster: Arc>, } +async fn get_cluster_name() -> anyhow::Result { + let mut headers = HeaderMap::new(); + headers.insert("Metadata-Flavor", HeaderValue::from_static("Google")); + let url = "http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name"; + let response = send_request_with_retries(url, 5, Method::GET, Some(headers), None).await; + response + .map_err(|err| anyhow::anyhow!("Failed fetching response from url: {url}: {err:?}"))? + .text() + .await + .context("Failed to read response as text") +} + impl Watcher { - pub fn new(client: kube::Client, cluster_name: String, namespaces: Vec) -> Self { + pub async fn new( + client: kube::Client, + cluster_name: Option, + namespaces: Vec, + ) -> Self { let mut ns = HashMap::new(); namespaces.into_iter().for_each(|n| { ns.insert(n, Namespace::default()); }); + let cluster_name = match cluster_name { + Some(c) => c, + None => get_cluster_name() + .await + .expect("Load cluster_name from GCP"), + }; + tracing::info!("Agent cluster name is {cluster_name}"); + Self { client, cluster: Arc::new(Mutex::new(Cluster { @@ -38,8 +65,6 @@ impl Watcher { pub async fn run(self) -> anyhow::Result<()> { // TODO: add actual metrics - AUTOSCALER_METRICS.protocol_version.set(1); - AUTOSCALER_METRICS.calls.inc_by(1); // TODO: watch for a list of namespaces, get: // - deployments (name, running, desired) [done] @@ -67,6 +92,15 @@ impl Watcher { .map_ok(Watched::Pod) .boxed(), ); + + let events: Api = Api::namespaced(self.client.clone(), namespace); + watchers.push( + watcher(events, watcher::Config::default()) + .default_backoff() + .applied_objects() + .map_ok(Watched::Event) + .boxed(), + ); } // select on applied events from all watchers let mut combo_stream = stream::select_all(watchers); @@ -75,61 +109,97 @@ impl Watcher { enum Watched { Deploy(api::apps::v1::Deployment), Pod(api::core::v1::Pod), + Event(api::core::v1::Event), } - while let Some(o) = combo_stream.try_next().await? { + while let Some(o) = combo_stream.next().await { match o { - Watched::Deploy(d) => { - let namespace = match d.namespace() { - Some(n) => n.to_string(), - None => continue, - }; - let mut cluster = self.cluster.lock().await; - let v = cluster.namespaces.get_mut(&namespace).unwrap(); - let dep = v - .deployments - .entry(d.name_any()) - .or_insert(Deployment::default()); - let nums = d.status.clone().unwrap_or_default(); - dep.running = nums.available_replicas.unwrap_or_default(); - dep.desired = nums.replicas.unwrap_or_default(); + Ok(o) => match o { + Watched::Deploy(d) => { + let namespace = match d.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let dep = v + .deployments + .entry(d.name_any()) + .or_insert(Deployment::default()); + let nums = d.status.clone().unwrap_or_default(); + dep.running = nums.available_replicas.unwrap_or_default(); + dep.desired = nums.replicas.unwrap_or_default(); - tracing::info!( - "Got deployment: {}, size: {}/{} un {}", - d.name_any(), - nums.available_replicas.unwrap_or_default(), - nums.replicas.unwrap_or_default(), - nums.unavailable_replicas.unwrap_or_default(), - ) - } - Watched::Pod(p) => { - let namespace = match p.namespace() { - Some(n) => n.to_string(), - None => continue, - }; - let mut cluster = self.cluster.lock().await; - let v = cluster.namespaces.get_mut(&namespace).unwrap(); - let pod = v.pods.entry(p.name_any()).or_insert(Pod::default()); - pod.owner = p - .owner_references() - .iter() - .map(|x| format!("{}/{}", x.kind.clone(), x.name.clone())) - .collect::>() - .join(":"); - // TODO: Collect replica sets to match deployments and pods. - let phase = p - .status - .clone() - .unwrap_or_default() - .phase - .unwrap_or_default(); - if phase != pod.status { - // TODO: try to get an idea how to set correct value on restart. - pod.changed = Utc::now(); + tracing::info!( + "Got deployment: {}, size: {}/{} un {}", + d.name_any(), + nums.available_replicas.unwrap_or_default(), + nums.replicas.unwrap_or_default(), + nums.unavailable_replicas.unwrap_or_default(), + ) } - pod.status = phase; + Watched::Pod(p) => { + let namespace = match p.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let pod = v.pods.entry(p.name_any()).or_insert(Pod::default()); + pod.owner = p + .owner_references() + .iter() + .map(|x| format!("{}/{}", x.kind.clone(), x.name.clone())) + .collect::>() + .join(":"); + // TODO: Collect replica sets to match deployments and pods. + let phase = p + .status + .clone() + .unwrap_or_default() + .phase + .unwrap_or_default(); + if phase != pod.status { + // TODO: try to get an idea how to set correct value on restart. + pod.changed = Utc::now(); + } + pod.status = phase; - tracing::info!("Got pod: {}", p.name_any()) - } + if pod.status == "Succeeded" || pod.status == "Failed" { + // Cleaning up list of pods. + v.pods.remove(&p.name_any()); + } + + tracing::info!("Got pod: {}", p.name_any()) + } + Watched::Event(e) => { + let namespace: String = match e.namespace() { + Some(n) => n, + None => "".into(), + }; + let name = e.name_any(); + let reason = e.reason.unwrap_or_default(); + if reason != "FailedScaleUp" { + // Ignore all events which are not scale issues. + continue; + } + let time: DateTime = match e.last_timestamp { + Some(t) => t.0, + None => Utc::now(), + }; + tracing::debug!( + "Got event: {}/{}, message: {:?}; action: {:?}, reason: {:?}", + namespace, + name, + e.message.unwrap_or_default(), + e.action.unwrap_or_default(), + reason + ); + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + v.scale_errors.push(ScaleEvent { name, time }) + } + }, + Err(err) => tracing::warn!("Error during watch: {err:?}"), } } diff --git a/prover/crates/bin/prover_autoscaler/src/lib.rs b/prover/crates/bin/prover_autoscaler/src/lib.rs index 0b0d704c9078..019fe2b7fb4d 100644 --- a/prover/crates/bin/prover_autoscaler/src/lib.rs +++ b/prover/crates/bin/prover_autoscaler/src/lib.rs @@ -1,5 +1,6 @@ pub mod agent; pub(crate) mod cluster_types; +pub mod config; pub mod global; pub mod k8s; pub(crate) mod metrics; diff --git a/prover/crates/bin/prover_autoscaler/src/main.rs b/prover/crates/bin/prover_autoscaler/src/main.rs index 196bd6deb81e..98ffdb49d824 100644 --- a/prover/crates/bin/prover_autoscaler/src/main.rs +++ b/prover/crates/bin/prover_autoscaler/src/main.rs @@ -6,10 +6,9 @@ use tokio::{ sync::{oneshot, watch}, task::JoinHandle, }; -use zksync_core_leftovers::temp_config_store::read_yaml_repr; -use zksync_protobuf_config::proto::prover_autoscaler; use zksync_prover_autoscaler::{ agent, + config::{config_from_yaml, ProverAutoscalerConfig}, global::{self}, k8s::{Scaler, Watcher}, task_wiring::TaskRunner, @@ -56,15 +55,11 @@ struct Opt { async fn main() -> anyhow::Result<()> { let opt = Opt::from_args(); let general_config = - read_yaml_repr::(&opt.config_path) - .context("general config")?; + config_from_yaml::(&opt.config_path).context("general config")?; let observability_config = general_config .observability .context("observability config")?; let _observability_guard = observability_config.install()?; - // That's unfortunate that there are at least 3 different Duration in rust and we use all 3 in this repo. - // TODO: Consider updating zksync_protobuf to support std::time::Duration. - let graceful_shutdown_timeout = general_config.graceful_shutdown_timeout.unsigned_abs(); let (stop_signal_sender, stop_signal_receiver) = oneshot::channel(); let mut stop_signal_sender = Some(stop_signal_sender); @@ -77,28 +72,21 @@ async fn main() -> anyhow::Result<()> { let (stop_sender, stop_receiver) = watch::channel(false); - let _ = rustls::crypto::ring::default_provider().install_default(); - let client = kube::Client::try_default().await?; - - tracing::info!("Starting ProverAutoscaler"); - let mut tasks = vec![]; match opt.job { AutoscalerType::Agent => { + tracing::info!("Starting ProverAutoscaler Agent"); let agent_config = general_config.agent_config.context("agent_config")?; let exporter_config = PrometheusExporterConfig::pull(agent_config.prometheus_port); tasks.push(tokio::spawn(exporter_config.run(stop_receiver.clone()))); - // TODO: maybe get cluster name from curl -H "Metadata-Flavor: Google" - // http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name - let watcher = Watcher::new( - client.clone(), - opt.cluster_name - .context("cluster_name is required for Agent")?, - agent_config.namespaces, - ); - let scaler = Scaler { client }; + let _ = rustls::crypto::ring::default_provider().install_default(); + let client = kube::Client::try_default().await?; + + let watcher = + Watcher::new(client.clone(), opt.cluster_name, agent_config.namespaces).await; + let scaler = Scaler::new(client, agent_config.dry_run); tasks.push(tokio::spawn(watcher.clone().run())); tasks.push(tokio::spawn(agent::run_server( agent_config.http_port, @@ -108,11 +96,13 @@ async fn main() -> anyhow::Result<()> { ))) } AutoscalerType::Scaler => { + tracing::info!("Starting ProverAutoscaler Scaler"); let scaler_config = general_config.scaler_config.context("scaler_config")?; - let interval = scaler_config.scaler_run_interval.unsigned_abs(); + let interval = scaler_config.scaler_run_interval; let exporter_config = PrometheusExporterConfig::pull(scaler_config.prometheus_port); tasks.push(tokio::spawn(exporter_config.run(stop_receiver.clone()))); - let watcher = global::watcher::Watcher::new(scaler_config.agents.clone()); + let watcher = + global::watcher::Watcher::new(scaler_config.agents.clone(), scaler_config.dry_run); let queuer = global::queuer::Queuer::new(scaler_config.prover_job_monitor_url.clone()); let scaler = global::scaler::Scaler::new(watcher.clone(), queuer, scaler_config); tasks.extend(get_tasks(watcher, scaler, interval, stop_receiver)?); @@ -128,7 +118,9 @@ async fn main() -> anyhow::Result<()> { } } stop_sender.send(true).ok(); - tasks.complete(graceful_shutdown_timeout).await; + tasks + .complete(general_config.graceful_shutdown_timeout) + .await; Ok(()) } diff --git a/prover/crates/bin/prover_autoscaler/src/metrics.rs b/prover/crates/bin/prover_autoscaler/src/metrics.rs index 09cbaa6ba00f..115ae3b74259 100644 --- a/prover/crates/bin/prover_autoscaler/src/metrics.rs +++ b/prover/crates/bin/prover_autoscaler/src/metrics.rs @@ -1,13 +1,25 @@ use vise::{Counter, Gauge, LabeledFamily, Metrics}; -use zksync_config::configs::prover_autoscaler::Gpu; + +use crate::config::Gpu; + +pub const DEFAULT_ERROR_CODE: u16 = 500; #[derive(Debug, Metrics)] #[metrics(prefix = "autoscaler")] pub(crate) struct AutoscalerMetrics { - pub protocol_version: Gauge, - pub calls: Counter, + #[metrics(labels = ["target_namespace", "protocol_version"])] + pub prover_protocol_version: LabeledFamily<(String, String), Gauge, 2>, #[metrics(labels = ["target_cluster", "target_namespace", "gpu"])] pub provers: LabeledFamily<(String, String, Gpu), Gauge, 3>, + #[metrics(labels = ["job", "target_cluster", "target_namespace"])] + pub jobs: LabeledFamily<(String, String, String), Gauge, 3>, + pub clusters_not_ready: Counter, + #[metrics(labels = ["target", "status"])] + pub calls: LabeledFamily<(String, u16), Counter, 2>, + #[metrics(labels = ["target_cluster"])] + pub scale_errors: LabeledFamily>, + #[metrics(labels = ["target_namespace", "job"])] + pub queue: LabeledFamily<(String, String), Gauge, 2>, } #[vise::register] diff --git a/prover/crates/bin/prover_job_monitor/src/main.rs b/prover/crates/bin/prover_job_monitor/src/main.rs index 9195b92882dd..8511135225a6 100644 --- a/prover/crates/bin/prover_job_monitor/src/main.rs +++ b/prover/crates/bin/prover_job_monitor/src/main.rs @@ -159,7 +159,7 @@ fn get_tasks( prover_jobs_archiver, ); - // job requeuers + // job re-queuers let proof_compressor_job_requeuer = ProofCompressorJobRequeuer::new( proof_compressor_config.max_attempts, proof_compressor_config.generation_timeout(), diff --git a/prover/crates/bin/witness_generator/README.md b/prover/crates/bin/witness_generator/README.md index dc476ca44fc3..6063c29b3348 100644 --- a/prover/crates/bin/witness_generator/README.md +++ b/prover/crates/bin/witness_generator/README.md @@ -1,9 +1,5 @@ # WitnessGenerator -Please read this -[doc](https://www.notion.so/matterlabs/Draft-FRI-Prover-Integration-Prover-Shadowing-c4b1373786eb43779a93118be4be5d99) -for rationale of this binary, alongside the existing one in zk-core. - The component is responsible for generating prover jobs and saving artifacts needed for the next round of proof aggregation. That is, every aggregation round needs two sets of input: diff --git a/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs index a8bc59bd45e5..31dc54814103 100644 --- a/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs +++ b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs @@ -5,7 +5,7 @@ use std::{ }; use circuit_definitions::{ - circuit_definitions::base_layer::{ZkSyncBaseLayerCircuit, ZkSyncBaseLayerStorage}, + circuit_definitions::base_layer::ZkSyncBaseLayerStorage, encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::fsm_input_output::ClosedFormInputCompactFormWitness, }; @@ -21,7 +21,7 @@ use zksync_multivm::{ zk_evm_latest::ethereum_types::Address, }; use zksync_object_store::ObjectStore; -use zksync_prover_fri_types::{keys::ClosedFormInputKey, CircuitAuxData}; +use zksync_prover_fri_types::keys::ClosedFormInputKey; use zksync_prover_interface::inputs::WitnessInputData; use zksync_system_constants::BOOTLOADER_ADDRESS; use zksync_types::L1BatchNumber; @@ -31,8 +31,7 @@ use crate::{ rounds::basic_circuits::Witness, storage_oracle::StorageOracle, utils::{ - expand_bootloader_contents, save_circuit, save_ram_premutation_queue_witness, - ClosedFormInputWrapper, KZG_TRUSTED_SETUP_FILE, + expand_bootloader_contents, save_circuit, ClosedFormInputWrapper, KZG_TRUSTED_SETUP_FILE, }, witness::WitnessStorage, }; @@ -64,17 +63,38 @@ pub(super) async fn generate_witness( let (circuit_sender, mut circuit_receiver) = tokio::sync::mpsc::channel(1); let (queue_sender, mut queue_receiver) = tokio::sync::mpsc::channel(1); - let (ram_permutation_queue_sender, mut ram_permutation_queue_receiver) = - tokio::sync::mpsc::channel(1); let make_circuits_span = tracing::info_span!("make_circuits"); let make_circuits_span_copy = make_circuits_span.clone(); + + use std::{sync::mpsc::sync_channel, thread}; + let (artifacts_sender, artifacts_receiver) = sync_channel(1); + + let artifacts_receiver_handle = thread::spawn(move || { + let span = tracing::info_span!(parent: make_circuits_span_copy, "make_circuits_blocking"); + + while let Ok(artifact) = artifacts_receiver.recv() { + match artifact { + WitnessGenerationArtifact::BaseLayerCircuit(circuit) => { + let parent_span = span.clone(); + tracing::info_span!(parent: parent_span, "send_circuit").in_scope(|| { + circuit_sender + .blocking_send(circuit) + .expect("failed to send circuit from harness"); + }); + } + WitnessGenerationArtifact::RecursionQueue((a, b, c)) => queue_sender + .blocking_send((a as u8, b, c)) + .expect("failed to send recursion queue from harness"), + _ => {} + } + } + }); + // Blocking call from harness that does the CPU heavy lifting. // Provides circuits and recursion queue via callback functions and returns scheduler witnesses. // Circuits are "streamed" one by one as they're being generated. let make_circuits_handle = tokio::task::spawn_blocking(move || { - let span = tracing::info_span!(parent: make_circuits_span_copy, "make_circuits_blocking"); - let witness_storage = WitnessStorage::new(input.vm_run_data.witness_block_state); let storage_view = StorageView::new(witness_storage).to_rc_ptr(); @@ -91,33 +111,11 @@ pub(super) async fn generate_witness( .to_str() .expect("Path to KZG trusted setup is not a UTF-8 string"); - let artifacts_callback = |artifact: WitnessGenerationArtifact| match artifact { - WitnessGenerationArtifact::BaseLayerCircuit(circuit) => { - let parent_span = span.clone(); - tracing::info_span!(parent: parent_span, "send_circuit").in_scope(|| { - circuit_sender - .blocking_send(circuit) - .expect("failed to send circuit from harness"); - }); - } - WitnessGenerationArtifact::RecursionQueue((a, b, c)) => queue_sender - .blocking_send((a as u8, b, c)) - .expect("failed to send recursion queue from harness"), - a @ WitnessGenerationArtifact::MemoryQueueWitness(_) => { - let parent_span = span.clone(); - tracing::info_span!(parent: parent_span, "send_ram_permutation_queue_witness") - .in_scope(|| { - ram_permutation_queue_sender - .blocking_send(a) - .expect("failed to send ram permutation queue sitness from harness"); - }); - } - }; - let evm_emulator_code_hash = input.vm_run_data.evm_emulator_code_hash; // By convention, default AA is used instead of the EVM emulator if the latter is disabled. let evm_emulator_code_hash = evm_emulator_code_hash.unwrap_or(input.vm_run_data.default_account_code_hash); + let (scheduler_witness, block_witness) = zkevm_test_harness::external_calls::run( Address::zero(), BOOTLOADER_ADDRESS, @@ -132,9 +130,9 @@ pub(super) async fn generate_witness( geometry_config, storage_oracle, tree, - path, + path.to_owned(), input.eip_4844_blobs.blobs(), - artifacts_callback, + artifacts_sender, ); (scheduler_witness, block_witness) }) @@ -153,8 +151,6 @@ pub(super) async fn generate_witness( // If the order is tampered with, proving will fail (as the proof would be computed for a different sequence of instruction). let mut circuit_sequence = 0; - let mut ram_circuit_sequence = 0; - while let Some(circuit) = circuit_receiver .recv() .instrument(tracing::info_span!("wait_for_circuit")) @@ -169,26 +165,9 @@ pub(super) async fn generate_witness( .await .expect("failed to get permit for running save circuit task"); - let partial_circuit_aux_data = match &circuit { - ZkSyncBaseLayerCircuit::RAMPermutation(_) => { - let circuit_subsequence_number = ram_circuit_sequence; - ram_circuit_sequence += 1; - Some(CircuitAuxData { - circuit_subsequence_number, - }) - } - _ => None, - }; - save_circuit_handles.push(tokio::task::spawn(async move { - let (circuit_id, circuit_url) = save_circuit( - block_number, - circuit, - sequence, - partial_circuit_aux_data, - object_store, - ) - .await; + let (circuit_id, circuit_url) = + save_circuit(block_number, circuit, sequence, object_store).await; drop(permit); (circuit_id, circuit_url) })); @@ -196,57 +175,6 @@ pub(super) async fn generate_witness( } .instrument(save_circuits_span); - let mut save_ram_queue_witness_handles = vec![]; - - let save_ram_queue_witness_span = tracing::info_span!("save_circuits"); - - // Future which receives part of RAM permutation circuits witnesses and saves them async. - // Uses semaphore because these artifacts are of significant size - let ram_queue_witness_receiver_handle = async { - let mut sorted_sequence = 0; - let mut unsorted_sequence = 0; - - while let Some(witness_artifact) = ram_permutation_queue_receiver - .recv() - .instrument(tracing::info_span!("wait_for_ram_witness")) - .await - { - let object_store = object_store.clone(); - let semaphore = semaphore.clone(); - let permit = semaphore - .acquire_owned() - .await - .expect("failed to get permit for running save ram permutation queue witness task"); - let (is_sorted, witness, sequence) = match witness_artifact { - WitnessGenerationArtifact::MemoryQueueWitness((witness, sorted)) => { - let sequence = if sorted { - let sequence = sorted_sequence; - sorted_sequence += 1; - sequence - } else { - let sequence = unsorted_sequence; - unsorted_sequence += 1; - sequence - }; - (sorted, witness, sequence) - } - _ => panic!("Invalid artifact received"), - }; - save_ram_queue_witness_handles.push(tokio::task::spawn(async move { - let _ = save_ram_premutation_queue_witness( - block_number, - sequence, - is_sorted, - witness, - object_store, - ) - .await; - drop(permit); - })); - } - } - .instrument(save_ram_queue_witness_span); - let mut save_queue_handles = vec![]; let save_queues_span = tracing::info_span!("save_queues"); @@ -272,11 +200,10 @@ pub(super) async fn generate_witness( } .instrument(save_queues_span); - let (witnesses, _, _, _) = tokio::join!( + let (witnesses, _, _) = tokio::join!( make_circuits_handle, circuit_receiver_handle, - queue_receiver_handle, - ram_queue_witness_receiver_handle + queue_receiver_handle ); let (mut scheduler_witness, block_aux_witness) = witnesses.unwrap(); @@ -301,11 +228,7 @@ pub(super) async fn generate_witness( .filter(|(circuit_id, _, _)| circuits_present.contains(circuit_id)) .collect(); - let _: Vec<_> = futures::future::join_all(save_ram_queue_witness_handles) - .await - .into_iter() - .map(|result| result.expect("failed to save ram permutation queue witness")) - .collect(); + artifacts_receiver_handle.join().unwrap(); scheduler_witness.previous_block_meta_hash = input.previous_batch_metadata.meta_hash.0; scheduler_witness.previous_block_aux_hash = input.previous_batch_metadata.aux_hash.0; diff --git a/prover/crates/bin/witness_generator/src/utils.rs b/prover/crates/bin/witness_generator/src/utils.rs index 8524bdae9ff0..ea631f19cd85 100644 --- a/prover/crates/bin/witness_generator/src/utils.rs +++ b/prover/crates/bin/witness_generator/src/utils.rs @@ -3,10 +3,7 @@ use std::{ sync::Arc, }; -use circuit_definitions::{ - circuit_definitions::base_layer::ZkSyncBaseLayerCircuit, - encodings::memory_query::MemoryQueueStateWitnesses, -}; +use circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerCircuit; use once_cell::sync::Lazy; use zkevm_test_harness::boojum::field::goldilocks::GoldilocksField; use zksync_multivm::utils::get_used_bootloader_memory_bytes; @@ -24,8 +21,8 @@ use zksync_prover_fri_types::{ encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness, }, - keys::{AggregationsKey, ClosedFormInputKey, FriCircuitKey, RamPermutationQueueWitnessKey}, - CircuitAuxData, CircuitWrapper, FriProofWrapper, RamPermutationQueueWitness, + keys::{AggregationsKey, ClosedFormInputKey, FriCircuitKey}, + CircuitWrapper, FriProofWrapper, }; use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber, ProtocolVersionId, U256}; @@ -121,7 +118,6 @@ pub async fn save_circuit( block_number: L1BatchNumber, circuit: ZkSyncBaseLayerCircuit, sequence_number: usize, - aux_data_for_partial_circuit: Option, object_store: Arc, ) -> (u8, String) { let circuit_id = circuit.numeric_circuit_type(); @@ -133,43 +129,12 @@ pub async fn save_circuit( depth: 0, }; - let blob_url = if let Some(aux_data_for_partial_circuit) = aux_data_for_partial_circuit { - object_store - .put( - circuit_key, - &CircuitWrapper::BasePartial((circuit, aux_data_for_partial_circuit)), - ) - .await - .unwrap() - } else { - object_store - .put(circuit_key, &CircuitWrapper::Base(circuit)) - .await - .unwrap() - }; - (circuit_id, blob_url) -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %block_number) -)] -pub async fn save_ram_premutation_queue_witness( - block_number: L1BatchNumber, - circuit_subsequence_number: usize, - is_sorted: bool, - witness: MemoryQueueStateWitnesses, - object_store: Arc, -) -> String { - let witness_key = RamPermutationQueueWitnessKey { - block_number, - circuit_subsequence_number, - is_sorted, - }; - object_store - .put(witness_key, &RamPermutationQueueWitness { witness }) + let blob_url = object_store + .put(circuit_key, &CircuitWrapper::Base(circuit)) .await - .unwrap() + .unwrap(); + + (circuit_id, blob_url) } #[tracing::instrument( diff --git a/prover/crates/lib/circuit_prover_service/Cargo.toml b/prover/crates/lib/circuit_prover_service/Cargo.toml new file mode 100644 index 000000000000..ca7d1ede02f1 --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "zksync_circuit_prover_service" +description = "ZKsync circuit prover service implementation" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +zksync_prover_job_processor.workspace = true +zksync_prover_fri_types.workspace = true +zksync_prover_keystore.workspace = true +zksync_prover_dal.workspace = true +zksync_types.workspace = true +zksync_object_store.workspace = true + +async-trait.workspace = true +anyhow.workspace = true +tokio = { workspace = true, features = ["macros", "time"] } +tokio-util.workspace = true +tracing.workspace = true + +shivini = { workspace = true, features = [ + "circuit_definitions", +] } +zkevm_test_harness.workspace = true +vise.workspace = true diff --git a/prover/crates/lib/circuit_prover_service/README.md b/prover/crates/lib/circuit_prover_service/README.md new file mode 100644 index 000000000000..3cc8a80e966d --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/README.md @@ -0,0 +1,96 @@ +# Circuit Prover Service + +This crate provides the building blocks for running circuit provers. Circuit proving is the heaviest part of the proving +process, being both the most time intensive and resource heavy part. + +The primitives exported by this lib are job runners, namely: + +- light_wvg_runner +- heavy_wvg_runner +- circuit_prover_runner + +The rest of the codebase simply covers the internals of creating a runner, which is an implementation of +`ProverJobProcessor`. + +## Witness Vector Generator Runner + +Runners related to synthesizing Witness Vector (the CPU heavy part of circuit proving). They are tied to +`prover_jobs_fri` table and operate over `ProverJobsFri` object storage bucket. + +Witness Vector Generators have big gaps in resource usages. Node proofs are the heavy jobs (~9GB RAM), whilst all others +are rather light (~2GB RAM). + +There are 2 ways to deal with this: + +1. run RAM left over / 9 which will result in RAM under utilization but simplify implementation +2. run multiple light WVG jobs, with a small amount of heavy WVG jobs. + +This implementation favors number 2. As such, `MetadataLoader` abstraction was introduced to force loading lighter and +heavier jobs. Heavier picker will try to prioritize nodes. If none are available, it falls back to light jobs in order +to maximize usage. + +### Job Picker + +Interacts with the database to get a job (as described above), loads the data from object store and then hydrates the +circuit. In current implementation, Ram Permutation circuits are sent separately in order to save RAM in basic witness +generation & reduce the amount of storage used by object store. A further optimization will be introduced later on, +which will remove the necessity of witness hydration on circuits. + +### Executor + +Straight forward, synthesizes witness vector from circuit. + +### Job Saver + +If successful, will provide data to GPU circuit prover over a channel. If it fails, will mark the database as such and +will later be retried (as marked by Prover Job Monitor). + +## GPU Circuit Prover + +Runners related to generating the circuit proof & verifying it. They are tied to `prover_jobs_fri` table and operate +over `ProverJobs` object storage bucket. + +### Job Picker + +Waits on information from (multiple) WVGs sent via a channel. + +### Executor + +Generates & verifies the circuit proof (on GPU). + +### Job Saver + +Persists information back to `prover_jobs_fri` table. Note that a job is picked by WVG & finished by CP. + +## Diagram + +```mermaid +sequenceDiagram + box Resources + participant db as Database + participant os as Object Store + end + box Heavy/Light Witness Vector Generator + participant wvg_p as Job Picker + participant wvg_e as Executor + participant wvg_s as Job Saver + end + box Circuit Prover + participant cp_p as Job Picker + participant cp_e as Executor + participant cp_s as Job Saver + end + wvg_p-->>db: Get job metadata + wvg_p-->>os: Get circuit + wvg_p-->>wvg_p: Hydrate circuit & get finalization hints + wvg_p-->>wvg_e: Provide metadata & circuit + wvg_e-->>wvg_e: Synthesize witness vector + wvg_e-->>wvg_s: Provide metadata & witness vector & circuit + wvg_s-->>cp_p: Provide metadata & witness vector & circuit + cp_p-->>cp_p: Get setup data + cp_p-->>cp_e: Provide metadata & witness vector & circuit + cp_e-->>cp_e: Prove & verify circuit proof + cp_e-->>cp_s: Provide metadata & proof + cp_s-->>os: Save proof + cp_s-->>db: Update job metadata +``` diff --git a/prover/crates/lib/circuit_prover_service/src/gpu_circuit_prover/gpu_circuit_prover_executor.rs b/prover/crates/lib/circuit_prover_service/src/gpu_circuit_prover/gpu_circuit_prover_executor.rs new file mode 100644 index 000000000000..043232a5003c --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/src/gpu_circuit_prover/gpu_circuit_prover_executor.rs @@ -0,0 +1,73 @@ +use std::time::Instant; + +use anyhow::Context; +use shivini::ProverContext; +use zksync_prover_fri_types::FriProofWrapper; +use zksync_prover_job_processor::Executor; +use zksync_types::prover_dal::FriProverJobMetadata; + +use crate::{ + metrics::CIRCUIT_PROVER_METRICS, types::circuit_prover_payload::GpuCircuitProverPayload, +}; + +/// GpuCircuitProver executor implementation. +/// Generates circuit proof & verifies it. +/// NOTE: It requires prover context, which is the way Shivini allocates VRAM. +pub struct GpuCircuitProverExecutor { + _prover_context: ProverContext, +} + +impl GpuCircuitProverExecutor { + pub fn new(prover_context: ProverContext) -> Self { + Self { + _prover_context: prover_context, + } + } +} + +impl Executor for GpuCircuitProverExecutor { + type Input = GpuCircuitProverPayload; + type Output = FriProofWrapper; + type Metadata = FriProverJobMetadata; + + #[tracing::instrument( + name = "gpu_circuit_prover_executor", + skip_all, + fields(l1_batch = % metadata.block_number) + )] + fn execute( + &self, + input: Self::Input, + metadata: Self::Metadata, + ) -> anyhow::Result { + let start_time = Instant::now(); + tracing::info!( + "Started executing gpu circuit prover job {}, on batch {}, for circuit {}, at round {}", + metadata.id, + metadata.block_number, + metadata.circuit_id, + metadata.aggregation_round + ); + let GpuCircuitProverPayload { + circuit, + witness_vector, + setup_data, + } = input; + + let proof_wrapper = circuit + .prove(witness_vector, setup_data) + .context("failed to gpu prove circuit")?; + tracing::info!( + "Finished executing gpu circuit prover job {}, on batch {}, for circuit {}, at round {} after {:?}", + metadata.id, + metadata.block_number, + metadata.circuit_id, + metadata.aggregation_round, + start_time.elapsed() + ); + CIRCUIT_PROVER_METRICS + .prove_and_verify_time + .observe(start_time.elapsed()); + Ok(proof_wrapper) + } +} diff --git a/prover/crates/lib/circuit_prover_service/src/gpu_circuit_prover/gpu_circuit_prover_job_picker.rs b/prover/crates/lib/circuit_prover_service/src/gpu_circuit_prover/gpu_circuit_prover_job_picker.rs new file mode 100644 index 000000000000..76dc0cda66d3 --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/src/gpu_circuit_prover/gpu_circuit_prover_job_picker.rs @@ -0,0 +1,92 @@ +use std::{collections::HashMap, sync::Arc, time::Instant}; + +use anyhow::Context; +use async_trait::async_trait; +use zksync_prover_fri_types::ProverServiceDataKey; +use zksync_prover_job_processor::JobPicker; +use zksync_prover_keystore::GoldilocksGpuProverSetupData; +use zksync_types::prover_dal::FriProverJobMetadata; + +use crate::{ + gpu_circuit_prover::GpuCircuitProverExecutor, + metrics::CIRCUIT_PROVER_METRICS, + types::{ + circuit_prover_payload::GpuCircuitProverPayload, + witness_vector_generator_execution_output::WitnessVectorGeneratorExecutionOutput, + }, +}; + +/// GpuCircuitProver job picker implementation. +/// Retrieves job & data from WVG job saver. +#[derive(Debug)] +pub struct GpuCircuitProverJobPicker { + receiver: + tokio::sync::mpsc::Receiver<(WitnessVectorGeneratorExecutionOutput, FriProverJobMetadata)>, + setup_data_cache: HashMap>, +} + +impl GpuCircuitProverJobPicker { + pub fn new( + receiver: tokio::sync::mpsc::Receiver<( + WitnessVectorGeneratorExecutionOutput, + FriProverJobMetadata, + )>, + setup_data_cache: HashMap>, + ) -> Self { + Self { + receiver, + setup_data_cache, + } + } +} + +#[async_trait] +impl JobPicker for GpuCircuitProverJobPicker { + type ExecutorType = GpuCircuitProverExecutor; + + async fn pick_job( + &mut self, + ) -> anyhow::Result> { + let start_time = Instant::now(); + tracing::info!("Started picking gpu circuit prover job"); + + let (wvg_output, metadata) = self + .receiver + .recv() + .await + .context("no witness vector generators are available, stopping...")?; + let WitnessVectorGeneratorExecutionOutput { + circuit, + witness_vector, + } = wvg_output; + + let key = ProverServiceDataKey { + circuit_id: metadata.circuit_id, + round: metadata.aggregation_round, + } + .crypto_setup_key(); + let setup_data = self + .setup_data_cache + .get(&key) + .context("failed to retrieve setup data from cache")? + .clone(); + + let payload = GpuCircuitProverPayload { + circuit, + witness_vector, + setup_data, + }; + tracing::info!( + "Finished picking gpu circuit prover job {}, on batch {}, for circuit {}, at round {} in {:?}", + metadata.id, + metadata.block_number, + metadata.circuit_id, + metadata.aggregation_round, + start_time.elapsed() + ); + CIRCUIT_PROVER_METRICS + .load_time + .observe(start_time.elapsed()); + Ok(Some((payload, metadata))) + } +} diff --git a/prover/crates/lib/circuit_prover_service/src/gpu_circuit_prover/gpu_circuit_prover_job_saver.rs b/prover/crates/lib/circuit_prover_service/src/gpu_circuit_prover/gpu_circuit_prover_job_saver.rs new file mode 100644 index 000000000000..0ba28a0d9f5a --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/src/gpu_circuit_prover/gpu_circuit_prover_job_saver.rs @@ -0,0 +1,126 @@ +use std::{sync::Arc, time::Instant}; + +use anyhow::Context; +use async_trait::async_trait; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::FriProofWrapper; +use zksync_prover_job_processor::JobSaver; +use zksync_types::{protocol_version::ProtocolSemanticVersion, prover_dal::FriProverJobMetadata}; + +use crate::{gpu_circuit_prover::GpuCircuitProverExecutor, metrics::CIRCUIT_PROVER_METRICS}; + +/// GpuCircuitProver job saver implementation. +/// Persists the job execution to database. In case of success, artifacts are uploaded to object store. +#[derive(Debug)] +pub struct GpuCircuitProverJobSaver { + connection_pool: ConnectionPool, + object_store: Arc, + protocol_version: ProtocolSemanticVersion, +} + +impl GpuCircuitProverJobSaver { + pub fn new( + connection_pool: ConnectionPool, + object_store: Arc, + protocol_version: ProtocolSemanticVersion, + ) -> Self { + Self { + connection_pool, + object_store, + protocol_version, + } + } +} + +#[async_trait] +impl JobSaver for GpuCircuitProverJobSaver { + type ExecutorType = GpuCircuitProverExecutor; + + #[tracing::instrument( + name = "gpu_circuit_prover_job_saver", + skip_all, + fields(l1_batch = % data.1.block_number) + )] + async fn save_job_result( + &self, + data: (anyhow::Result, FriProverJobMetadata), + ) -> anyhow::Result<()> { + let start_time = Instant::now(); + let (result, metadata) = data; + tracing::info!( + "Started saving gpu circuit prover job {}, on batch {}, for circuit {}, at round {}", + metadata.id, + metadata.block_number, + metadata.circuit_id, + metadata.aggregation_round + ); + + match result { + Ok(proof_wrapper) => { + let mut connection = self + .connection_pool + .connection() + .await + .context("failed to get db connection")?; + + let is_scheduler_proof = metadata.is_scheduler_proof()?; + + let blob_url = self + .object_store + .put(metadata.id, &proof_wrapper) + .await + .context("failed to upload to object store")?; + + let mut transaction = connection + .start_transaction() + .await + .context("failed to start db transaction")?; + transaction + .fri_prover_jobs_dal() + .save_proof(metadata.id, metadata.pick_time.elapsed(), &blob_url) + .await; + if is_scheduler_proof { + transaction + .fri_proof_compressor_dal() + .insert_proof_compression_job( + metadata.block_number, + &blob_url, + self.protocol_version, + ) + .await; + } + transaction + .commit() + .await + .context("failed to commit db transaction")?; + } + Err(error) => { + let error_message = error.to_string(); + tracing::error!("GPU circuit prover failed: {:?}", error_message); + self.connection_pool + .connection() + .await + .context("failed to get db connection")? + .fri_prover_jobs_dal() + .save_proof_error(metadata.id, error_message) + .await; + } + }; + tracing::info!( + "Finished saving gpu circuit prover job {}, on batch {}, for circuit {}, at round {} after {:?}", + metadata.id, + metadata.block_number, + metadata.circuit_id, + metadata.aggregation_round, + start_time.elapsed() + ); + CIRCUIT_PROVER_METRICS + .save_time + .observe(start_time.elapsed()); + CIRCUIT_PROVER_METRICS + .full_time + .observe(metadata.pick_time.elapsed()); + Ok(()) + } +} diff --git a/prover/crates/lib/circuit_prover_service/src/gpu_circuit_prover/mod.rs b/prover/crates/lib/circuit_prover_service/src/gpu_circuit_prover/mod.rs new file mode 100644 index 000000000000..7dff12aa2cc6 --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/src/gpu_circuit_prover/mod.rs @@ -0,0 +1,8 @@ +pub use gpu_circuit_prover_executor::GpuCircuitProverExecutor; +pub use gpu_circuit_prover_job_picker::GpuCircuitProverJobPicker; +pub use gpu_circuit_prover_job_saver::GpuCircuitProverJobSaver; + +mod gpu_circuit_prover_executor; + +mod gpu_circuit_prover_job_picker; +mod gpu_circuit_prover_job_saver; diff --git a/prover/crates/lib/circuit_prover_service/src/job_runner.rs b/prover/crates/lib/circuit_prover_service/src/job_runner.rs new file mode 100644 index 000000000000..2e102fd40e33 --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/src/job_runner.rs @@ -0,0 +1,144 @@ +use std::{collections::HashMap, sync::Arc}; + +use shivini::ProverContext; +use tokio_util::sync::CancellationToken; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover}; +use zksync_prover_fri_types::{ + circuit_definitions::boojum::cs::implementations::setup::FinalizationHintsForProver, + get_current_pod_name, ProverServiceDataKey, +}; +use zksync_prover_job_processor::{Backoff, BackoffAndCancellable, JobRunner}; +use zksync_prover_keystore::GoldilocksGpuProverSetupData; +use zksync_types::{protocol_version::ProtocolSemanticVersion, prover_dal::FriProverJobMetadata}; + +use crate::{ + gpu_circuit_prover::{ + GpuCircuitProverExecutor, GpuCircuitProverJobPicker, GpuCircuitProverJobSaver, + }, + types::witness_vector_generator_execution_output::WitnessVectorGeneratorExecutionOutput, + witness_vector_generator::{ + HeavyWitnessVectorMetadataLoader, LightWitnessVectorMetadataLoader, + WitnessVectorGeneratorExecutor, WitnessVectorGeneratorJobPicker, + WitnessVectorGeneratorJobSaver, WitnessVectorMetadataLoader, + }, +}; + +/// Convenience struct helping with building Witness Vector Generator runners. +#[derive(Debug)] +pub struct WvgRunnerBuilder { + connection_pool: ConnectionPool, + object_store: Arc, + protocol_version: ProtocolSemanticVersion, + finalization_hints_cache: HashMap>, + sender: + tokio::sync::mpsc::Sender<(WitnessVectorGeneratorExecutionOutput, FriProverJobMetadata)>, + cancellation_token: CancellationToken, + pod_name: String, +} + +impl WvgRunnerBuilder { + pub fn new( + connection_pool: ConnectionPool, + object_store: Arc, + protocol_version: ProtocolSemanticVersion, + finalization_hints_cache: HashMap>, + sender: tokio::sync::mpsc::Sender<( + WitnessVectorGeneratorExecutionOutput, + FriProverJobMetadata, + )>, + cancellation_token: CancellationToken, + ) -> Self { + Self { + connection_pool, + object_store, + protocol_version, + finalization_hints_cache, + sender, + cancellation_token, + pod_name: get_current_pod_name(), + } + } + + /// Witness Vector Generator runner implementation for light jobs. + pub fn light_wvg_runner( + &self, + count: usize, + ) -> JobRunner< + WitnessVectorGeneratorExecutor, + WitnessVectorGeneratorJobPicker, + WitnessVectorGeneratorJobSaver, + > { + let metadata_loader = + LightWitnessVectorMetadataLoader::new(self.pod_name.clone(), self.protocol_version); + + self.wvg_runner(count, metadata_loader) + } + + /// Witness Vector Generator runner implementation that prioritizes heavy jobs over light jobs. + pub fn heavy_wvg_runner( + &self, + count: usize, + ) -> JobRunner< + WitnessVectorGeneratorExecutor, + WitnessVectorGeneratorJobPicker, + WitnessVectorGeneratorJobSaver, + > { + let metadata_loader = + HeavyWitnessVectorMetadataLoader::new(self.pod_name.clone(), self.protocol_version); + + self.wvg_runner(count, metadata_loader) + } + + /// Creates a Witness Vector Generator job runner with specified MetadataLoader. + /// The MetadataLoader makes the difference between heavy & light WVG runner. + fn wvg_runner( + &self, + count: usize, + metadata_loader: ML, + ) -> JobRunner< + WitnessVectorGeneratorExecutor, + WitnessVectorGeneratorJobPicker, + WitnessVectorGeneratorJobSaver, + > { + let executor = WitnessVectorGeneratorExecutor; + let job_picker = WitnessVectorGeneratorJobPicker::new( + self.connection_pool.clone(), + self.object_store.clone(), + self.finalization_hints_cache.clone(), + metadata_loader, + ); + let job_saver = + WitnessVectorGeneratorJobSaver::new(self.connection_pool.clone(), self.sender.clone()); + let backoff = Backoff::default(); + + JobRunner::new( + executor, + job_picker, + job_saver, + count, + Some(BackoffAndCancellable::new( + backoff, + self.cancellation_token.clone(), + )), + ) + } +} + +/// Circuit Prover runner implementation. +pub fn circuit_prover_runner( + connection_pool: ConnectionPool, + object_store: Arc, + protocol_version: ProtocolSemanticVersion, + setup_data_cache: HashMap>, + receiver: tokio::sync::mpsc::Receiver<( + WitnessVectorGeneratorExecutionOutput, + FriProverJobMetadata, + )>, + prover_context: ProverContext, +) -> JobRunner { + let executor = GpuCircuitProverExecutor::new(prover_context); + let job_picker = GpuCircuitProverJobPicker::new(receiver, setup_data_cache); + let job_saver = GpuCircuitProverJobSaver::new(connection_pool, object_store, protocol_version); + JobRunner::new(executor, job_picker, job_saver, 1, None) +} diff --git a/prover/crates/lib/circuit_prover_service/src/lib.rs b/prover/crates/lib/circuit_prover_service/src/lib.rs new file mode 100644 index 000000000000..0d7b146cc43b --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/src/lib.rs @@ -0,0 +1,7 @@ +#![allow(incomplete_features)] // Crypto code uses generic const exprs +#![feature(generic_const_exprs)] +mod gpu_circuit_prover; +pub mod job_runner; +mod metrics; +mod types; +mod witness_vector_generator; diff --git a/prover/crates/lib/circuit_prover_service/src/metrics.rs b/prover/crates/lib/circuit_prover_service/src/metrics.rs new file mode 100644 index 000000000000..c102422c4771 --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/src/metrics.rs @@ -0,0 +1,46 @@ +use std::time::Duration; + +use vise::{Buckets, Histogram, Metrics}; + +/// Metrics for witness vector generator execution +#[derive(Debug, Metrics)] +#[metrics(prefix = "witness_vector_generator")] +pub struct WitnessVectorGeneratorMetrics { + /// How long does it take to load witness vector inputs? + #[metrics(buckets = Buckets::LATENCIES)] + pub pick_time: Histogram, + /// How long does it take to synthesize witness vector? + #[metrics(buckets = Buckets::LATENCIES)] + pub synthesize_time: Histogram, + /// How long does it take to send witness vectors to gpu prover? + #[metrics(buckets = Buckets::LATENCIES)] + pub transfer_time: Histogram, + /// How long does it take to save witness vector failure? + #[metrics(buckets = Buckets::LATENCIES)] + pub save_time: Histogram, +} + +#[vise::register] +pub static WITNESS_VECTOR_GENERATOR_METRICS: vise::Global = + vise::Global::new(); + +/// Metrics for GPU circuit prover execution +#[derive(Debug, Metrics)] +#[metrics(prefix = "circuit_prover")] +pub struct CircuitProverMetrics { + /// How long does it take to load prover inputs? + #[metrics(buckets = Buckets::LATENCIES)] + pub load_time: Histogram, + /// How long does it take to prove & verify? + #[metrics(buckets = Buckets::LATENCIES)] + pub prove_and_verify_time: Histogram, + /// How long does it take to save prover results? + #[metrics(buckets = Buckets::LATENCIES)] + pub save_time: Histogram, + /// How long does it take finish a prover job from witness vector to circuit prover? + #[metrics(buckets = Buckets::LATENCIES)] + pub full_time: Histogram, +} + +#[vise::register] +pub static CIRCUIT_PROVER_METRICS: vise::Global = vise::Global::new(); diff --git a/prover/crates/lib/circuit_prover_service/src/types/circuit.rs b/prover/crates/lib/circuit_prover_service/src/types/circuit.rs new file mode 100644 index 000000000000..19c05666b2c5 --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/src/types/circuit.rs @@ -0,0 +1,152 @@ +use std::sync::Arc; + +use anyhow::Context; +use shivini::{gpu_proof_config::GpuProofConfig, gpu_prove_from_external_witness_data}; +use zkevm_test_harness::{ + boojum::cs::implementations::setup::FinalizationHintsForProver, + prover_utils::{verify_base_layer_proof, verify_recursion_layer_proof}, +}; +use zksync_prover_fri_types::{ + circuit_definitions::{ + base_layer_proof_config, + boojum::{ + algebraic_props::{ + round_function::AbsorptionModeOverwrite, sponge::GoldilocksPoseidon2Sponge, + }, + cs::implementations::{ + pow::NoPow, proof::Proof as CryptoProof, transcript::GoldilocksPoisedon2Transcript, + witness::WitnessVec, + }, + field::goldilocks::{GoldilocksExt2, GoldilocksField}, + worker::Worker, + }, + circuit_definitions::{ + base_layer::{ZkSyncBaseLayerCircuit, ZkSyncBaseLayerProof}, + recursion_layer::{ZkSyncRecursionLayerProof, ZkSyncRecursiveLayerCircuit}, + }, + recursion_layer_proof_config, + }, + FriProofWrapper, +}; +use zksync_prover_keystore::GoldilocksGpuProverSetupData; + +type Transcript = GoldilocksPoisedon2Transcript; +type Field = GoldilocksField; +type Hasher = GoldilocksPoseidon2Sponge; +type Extension = GoldilocksExt2; +type Proof = CryptoProof; + +/// Hydrated circuit. +/// Circuits are currently dehydrated for memory and storage reasons. +/// Circuits are hydrated on the flight where necessary. +// TODO: This enum will be merged with CircuitWrapper once BWG changes are done. +#[allow(clippy::large_enum_variant)] +pub enum Circuit { + Base(ZkSyncBaseLayerCircuit), + Recursive(ZkSyncRecursiveLayerCircuit), +} + +impl Circuit { + /// Generates proof for given witness vector. + /// Expects setup_data to match witness vector. + pub(crate) fn prove( + &self, + witness_vector: WitnessVec, + setup_data: Arc, + ) -> anyhow::Result { + let worker = Worker::new(); + + match self { + Circuit::Base(circuit) => { + let proof = Self::prove_base(circuit, witness_vector, setup_data, worker)?; + let circuit_id = circuit.numeric_circuit_type(); + Ok(FriProofWrapper::Base(ZkSyncBaseLayerProof::from_inner( + circuit_id, proof, + ))) + } + Circuit::Recursive(circuit) => { + let proof = Self::prove_recursive(circuit, witness_vector, setup_data, worker)?; + let circuit_id = circuit.numeric_circuit_type(); + Ok(FriProofWrapper::Recursive( + ZkSyncRecursionLayerProof::from_inner(circuit_id, proof), + )) + } + } + } + + /// Prove & verify base circuit. + fn prove_base( + circuit: &ZkSyncBaseLayerCircuit, + witness_vector: WitnessVec, + setup_data: Arc, + worker: Worker, + ) -> anyhow::Result { + let span = tracing::info_span!("prove_base_circuit").entered(); + let gpu_proof_config = GpuProofConfig::from_base_layer_circuit(circuit); + let boojum_proof_config = base_layer_proof_config(); + let proof = gpu_prove_from_external_witness_data::( + &gpu_proof_config, + &witness_vector, + boojum_proof_config, + &setup_data.setup, + &setup_data.vk, + (), + &worker, + ) + .context("failed to generate base proof")? + .into(); + drop(span); + let _span = tracing::info_span!("verify_base_circuit").entered(); + if !verify_base_layer_proof::(circuit, &proof, &setup_data.vk) { + return Err(anyhow::anyhow!("failed to verify base proof")); + } + Ok(proof) + } + + /// Prove & verify recursive circuit. + fn prove_recursive( + circuit: &ZkSyncRecursiveLayerCircuit, + witness_vector: WitnessVec, + setup_data: Arc, + worker: Worker, + ) -> anyhow::Result { + let span = tracing::info_span!("prove_recursive_circuit").entered(); + let gpu_proof_config = GpuProofConfig::from_recursive_layer_circuit(circuit); + let boojum_proof_config = recursion_layer_proof_config(); + let proof = gpu_prove_from_external_witness_data::( + &gpu_proof_config, + &witness_vector, + boojum_proof_config, + &setup_data.setup, + &setup_data.vk, + (), + &worker, + ) + .context("failed to generate recursive proof")? + .into(); + drop(span); + let _span = tracing::info_span!("verify_recursive_circuit").entered(); + if !verify_recursion_layer_proof::(circuit, &proof, &setup_data.vk) { + return Err(anyhow::anyhow!("failed to verify recursive proof")); + } + Ok(proof) + } + + /// Synthesize vector for a given circuit. + /// Expects finalization hints to match circuit. + pub(crate) fn synthesize_vector( + &self, + finalization_hints: Arc, + ) -> anyhow::Result> { + let _span = tracing::info_span!("synthesize_vector").entered(); + + let cs = match self { + Circuit::Base(circuit) => circuit.synthesis::(&finalization_hints), + Circuit::Recursive(circuit) => { + circuit.synthesis::(&finalization_hints) + } + }; + cs.witness + .context("circuit is missing witness post synthesis") + } +} diff --git a/prover/crates/lib/circuit_prover_service/src/types/circuit_prover_payload.rs b/prover/crates/lib/circuit_prover_service/src/types/circuit_prover_payload.rs new file mode 100644 index 000000000000..925b7b318ccc --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/src/types/circuit_prover_payload.rs @@ -0,0 +1,15 @@ +use std::sync::Arc; + +use zksync_prover_fri_types::circuit_definitions::boojum::{ + cs::implementations::witness::WitnessVec, field::goldilocks::GoldilocksField, +}; +use zksync_prover_keystore::GoldilocksGpuProverSetupData; + +use crate::types::circuit::Circuit; + +/// Payload used as input for GPU circuit prover. +pub struct GpuCircuitProverPayload { + pub circuit: Circuit, + pub witness_vector: WitnessVec, + pub setup_data: Arc, +} diff --git a/prover/crates/lib/circuit_prover_service/src/types/mod.rs b/prover/crates/lib/circuit_prover_service/src/types/mod.rs new file mode 100644 index 000000000000..cbbf0d885f7a --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/src/types/mod.rs @@ -0,0 +1,4 @@ +pub mod circuit; +pub mod circuit_prover_payload; +pub mod witness_vector_generator_execution_output; +pub mod witness_vector_generator_payload; diff --git a/prover/crates/lib/circuit_prover_service/src/types/witness_vector_generator_execution_output.rs b/prover/crates/lib/circuit_prover_service/src/types/witness_vector_generator_execution_output.rs new file mode 100644 index 000000000000..593f825f8f99 --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/src/types/witness_vector_generator_execution_output.rs @@ -0,0 +1,11 @@ +use zksync_prover_fri_types::circuit_definitions::boojum::{ + cs::implementations::witness::WitnessVec, field::goldilocks::GoldilocksField, +}; + +use crate::types::circuit::Circuit; + +/// Witness vector generator output. Used as input for GPU circuit provers. +pub struct WitnessVectorGeneratorExecutionOutput { + pub circuit: Circuit, + pub witness_vector: WitnessVec, +} diff --git a/prover/crates/lib/circuit_prover_service/src/types/witness_vector_generator_payload.rs b/prover/crates/lib/circuit_prover_service/src/types/witness_vector_generator_payload.rs new file mode 100644 index 000000000000..409e178ac61a --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/src/types/witness_vector_generator_payload.rs @@ -0,0 +1,11 @@ +use std::sync::Arc; + +use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::setup::FinalizationHintsForProver; + +use crate::types::circuit::Circuit; + +/// Payload used as input for Witness Vector Generator. +pub struct WitnessVectorGeneratorPayload { + pub circuit: Circuit, + pub finalization_hints: Arc, +} diff --git a/prover/crates/lib/circuit_prover_service/src/witness_vector_generator/mod.rs b/prover/crates/lib/circuit_prover_service/src/witness_vector_generator/mod.rs new file mode 100644 index 000000000000..d5b140dac94f --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/src/witness_vector_generator/mod.rs @@ -0,0 +1,11 @@ +pub use witness_vector_generator_executor::WitnessVectorGeneratorExecutor; +pub use witness_vector_generator_job_picker::WitnessVectorGeneratorJobPicker; +pub use witness_vector_generator_job_saver::WitnessVectorGeneratorJobSaver; +pub use witness_vector_generator_metadata_loader::{ + HeavyWitnessVectorMetadataLoader, LightWitnessVectorMetadataLoader, WitnessVectorMetadataLoader, +}; + +mod witness_vector_generator_executor; +mod witness_vector_generator_job_picker; +mod witness_vector_generator_job_saver; +mod witness_vector_generator_metadata_loader; diff --git a/prover/crates/lib/circuit_prover_service/src/witness_vector_generator/witness_vector_generator_executor.rs b/prover/crates/lib/circuit_prover_service/src/witness_vector_generator/witness_vector_generator_executor.rs new file mode 100644 index 000000000000..e9dd7e31fd63 --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/src/witness_vector_generator/witness_vector_generator_executor.rs @@ -0,0 +1,66 @@ +use std::time::Instant; + +use anyhow::Context; +use zksync_prover_job_processor::Executor; +use zksync_types::prover_dal::FriProverJobMetadata; + +use crate::{ + metrics::WITNESS_VECTOR_GENERATOR_METRICS, + types::{ + witness_vector_generator_execution_output::WitnessVectorGeneratorExecutionOutput, + witness_vector_generator_payload::WitnessVectorGeneratorPayload, + }, +}; + +/// WitnessVectorGenerator executor implementation. +/// Synthesizes witness vectors to be later be used in GPU circuit proving. +#[derive(Debug)] +pub struct WitnessVectorGeneratorExecutor; + +impl Executor for WitnessVectorGeneratorExecutor { + type Input = WitnessVectorGeneratorPayload; + type Output = WitnessVectorGeneratorExecutionOutput; + type Metadata = FriProverJobMetadata; + + #[tracing::instrument( + name = "witness_vector_generator_executor", + skip_all, + fields(l1_batch = % metadata.block_number) + )] + fn execute( + &self, + input: Self::Input, + metadata: Self::Metadata, + ) -> anyhow::Result { + let start_time = Instant::now(); + tracing::info!( + "Started executing witness vector generator job {}, on batch {}, for circuit {}, at round {}", + metadata.id, + metadata.block_number, + metadata.circuit_id, + metadata.aggregation_round + ); + let WitnessVectorGeneratorPayload { + circuit, + finalization_hints, + } = input; + let witness_vector = circuit + .synthesize_vector(finalization_hints) + .context("failed to generate witness vector")?; + tracing::info!( + "Finished executing witness vector generator job {}, on batch {}, for circuit {}, at round {} in {:?}", + metadata.id, + metadata.block_number, + metadata.circuit_id, + metadata.aggregation_round, + start_time.elapsed() + ); + WITNESS_VECTOR_GENERATOR_METRICS + .synthesize_time + .observe(start_time.elapsed()); + Ok(WitnessVectorGeneratorExecutionOutput { + circuit, + witness_vector, + }) + } +} diff --git a/prover/crates/lib/circuit_prover_service/src/witness_vector_generator/witness_vector_generator_job_picker.rs b/prover/crates/lib/circuit_prover_service/src/witness_vector_generator/witness_vector_generator_job_picker.rs new file mode 100644 index 000000000000..76e0f151c7ca --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/src/witness_vector_generator/witness_vector_generator_job_picker.rs @@ -0,0 +1,167 @@ +use std::{collections::HashMap, sync::Arc, time::Instant}; + +use anyhow::Context; +use async_trait::async_trait; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover}; +use zksync_prover_fri_types::{ + circuit_definitions::{ + boojum::{ + cs::implementations::setup::FinalizationHintsForProver, + gadgets::queue::full_state_queue::FullStateCircuitQueueRawWitness, + }, + circuit_definitions::base_layer::ZkSyncBaseLayerCircuit, + }, + keys::RamPermutationQueueWitnessKey, + CircuitAuxData, CircuitWrapper, ProverServiceDataKey, RamPermutationQueueWitness, +}; +use zksync_prover_job_processor::JobPicker; +use zksync_types::{prover_dal::FriProverJobMetadata, L1BatchNumber}; + +use crate::{ + metrics::WITNESS_VECTOR_GENERATOR_METRICS, + types::{circuit::Circuit, witness_vector_generator_payload::WitnessVectorGeneratorPayload}, + witness_vector_generator::{ + witness_vector_generator_metadata_loader::WitnessVectorMetadataLoader, + WitnessVectorGeneratorExecutor, + }, +}; + +/// WitnessVectorGenerator job picker implementation. +/// Picks job from database (via MetadataLoader) and gets data from object store. +#[derive(Debug)] +pub struct WitnessVectorGeneratorJobPicker { + connection_pool: ConnectionPool, + object_store: Arc, + finalization_hints_cache: HashMap>, + metadata_loader: ML, +} + +impl WitnessVectorGeneratorJobPicker { + pub fn new( + connection_pool: ConnectionPool, + object_store: Arc, + finalization_hints_cache: HashMap>, + metadata_loader: ML, + ) -> Self { + Self { + connection_pool, + object_store, + finalization_hints_cache, + metadata_loader, + } + } + + /// Hydrates job data with witness information which is stored separately. + /// This is done in order to save RAM & storage. + // TODO: Once new BWG is done, this won't be necessary. + async fn fill_witness( + &self, + circuit: ZkSyncBaseLayerCircuit, + aux_data: CircuitAuxData, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result { + if let ZkSyncBaseLayerCircuit::RAMPermutation(circuit_instance) = circuit { + let sorted_witness_key = RamPermutationQueueWitnessKey { + block_number: l1_batch_number, + circuit_subsequence_number: aux_data.circuit_subsequence_number as usize, + is_sorted: true, + }; + let sorted_witness: RamPermutationQueueWitness = self + .object_store + .get(sorted_witness_key) + .await + .context("failed to load sorted witness key")?; + + let unsorted_witness_key = RamPermutationQueueWitnessKey { + block_number: l1_batch_number, + circuit_subsequence_number: aux_data.circuit_subsequence_number as usize, + is_sorted: false, + }; + let unsorted_witness: RamPermutationQueueWitness = self + .object_store + .get(unsorted_witness_key) + .await + .context("failed to load unsorted witness key")?; + + let mut witness = circuit_instance.witness.take().unwrap(); + witness.unsorted_queue_witness = FullStateCircuitQueueRawWitness { + elements: unsorted_witness.witness.into(), + }; + witness.sorted_queue_witness = FullStateCircuitQueueRawWitness { + elements: sorted_witness.witness.into(), + }; + circuit_instance.witness.store(Some(witness)); + + return Ok(Circuit::Base(ZkSyncBaseLayerCircuit::RAMPermutation( + circuit_instance, + ))); + } + Err(anyhow::anyhow!( + "unexpected circuit received with partial witness, expected RAM permutation, got {:?}", + circuit.short_description() + )) + } +} + +#[async_trait] +impl JobPicker for WitnessVectorGeneratorJobPicker { + type ExecutorType = WitnessVectorGeneratorExecutor; + async fn pick_job( + &mut self, + ) -> anyhow::Result> { + let start_time = Instant::now(); + tracing::info!("Started picking witness vector generator job"); + let connection = self + .connection_pool + .connection() + .await + .context("failed to get db connection")?; + let metadata = match self.metadata_loader.load_metadata(connection).await { + None => return Ok(None), + Some(metadata) => metadata, + }; + + let circuit_wrapper = self + .object_store + .get(metadata.into()) + .await + .context("failed to get circuit_wrapper from object store")?; + let circuit = match circuit_wrapper { + CircuitWrapper::Base(circuit) => Circuit::Base(circuit), + CircuitWrapper::Recursive(circuit) => Circuit::Recursive(circuit), + CircuitWrapper::BasePartial((circuit, aux_data)) => self + .fill_witness(circuit, aux_data, metadata.block_number) + .await + .context("failed to fill witness")?, + }; + + let key = ProverServiceDataKey { + circuit_id: metadata.circuit_id, + round: metadata.aggregation_round, + } + .crypto_setup_key(); + let finalization_hints = self + .finalization_hints_cache + .get(&key) + .context("failed to retrieve finalization key from cache")? + .clone(); + + let payload = WitnessVectorGeneratorPayload { + circuit, + finalization_hints, + }; + tracing::info!( + "Finished picking witness vector generator job {}, on batch {}, for circuit {}, at round {} in {:?}", + metadata.id, + metadata.block_number, + metadata.circuit_id, + metadata.aggregation_round, + start_time.elapsed() + ); + WITNESS_VECTOR_GENERATOR_METRICS + .pick_time + .observe(start_time.elapsed()); + Ok(Some((payload, metadata))) + } +} diff --git a/prover/crates/lib/circuit_prover_service/src/witness_vector_generator/witness_vector_generator_job_saver.rs b/prover/crates/lib/circuit_prover_service/src/witness_vector_generator/witness_vector_generator_job_saver.rs new file mode 100644 index 000000000000..86e04472b299 --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/src/witness_vector_generator/witness_vector_generator_job_saver.rs @@ -0,0 +1,114 @@ +use std::time::Instant; + +use anyhow::Context; +use async_trait::async_trait; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_job_processor::JobSaver; +use zksync_types::prover_dal::FriProverJobMetadata; + +use crate::{ + metrics::WITNESS_VECTOR_GENERATOR_METRICS, + types::witness_vector_generator_execution_output::WitnessVectorGeneratorExecutionOutput, + witness_vector_generator::WitnessVectorGeneratorExecutor, +}; + +/// WitnessVectorGenerator job saver implementation. +/// On successful execution, sends data further to gpu circuit prover. +/// On error, marks the job as failed in database. +#[derive(Debug)] +pub struct WitnessVectorGeneratorJobSaver { + connection_pool: ConnectionPool, + sender: + tokio::sync::mpsc::Sender<(WitnessVectorGeneratorExecutionOutput, FriProverJobMetadata)>, +} + +impl WitnessVectorGeneratorJobSaver { + pub fn new( + connection_pool: ConnectionPool, + sender: tokio::sync::mpsc::Sender<( + WitnessVectorGeneratorExecutionOutput, + FriProverJobMetadata, + )>, + ) -> Self { + Self { + connection_pool, + sender, + } + } +} + +#[async_trait] +impl JobSaver for WitnessVectorGeneratorJobSaver { + type ExecutorType = WitnessVectorGeneratorExecutor; + + #[tracing::instrument( + name = "witness_vector_generator_save_job", + skip_all, + fields(l1_batch = % data.1.block_number) + )] + async fn save_job_result( + &self, + data: ( + anyhow::Result, + FriProverJobMetadata, + ), + ) -> anyhow::Result<()> { + let start_time = Instant::now(); + let (result, metadata) = data; + match result { + Ok(payload) => { + tracing::info!( + "Started transferring witness vector generator job {}, on batch {}, for circuit {}, at round {}", + metadata.id, + metadata.block_number, + metadata.circuit_id, + metadata.aggregation_round + ); + if self.sender.send((payload, metadata)).await.is_err() { + tracing::warn!("circuit prover shut down prematurely"); + return Ok(()); + } + tracing::info!( + "Finished transferring witness vector generator job {}, on batch {}, for circuit {}, at round {} in {:?}", + metadata.id, + metadata.block_number, + metadata.circuit_id, + metadata.aggregation_round, + start_time.elapsed() + ); + WITNESS_VECTOR_GENERATOR_METRICS + .transfer_time + .observe(start_time.elapsed()); + } + Err(err) => { + tracing::error!("Witness vector generation failed: {:?}", err); + tracing::info!( + "Started saving failure for witness vector generator job {}, on batch {}, for circuit {}, at round {}", + metadata.id, + metadata.block_number, + metadata.circuit_id, + metadata.aggregation_round + ); + self.connection_pool + .connection() + .await + .context("failed to get db connection")? + .fri_prover_jobs_dal() + .save_proof_error(metadata.id, err.to_string()) + .await; + tracing::info!( + "Finished saving failure for witness vector generator job {}, on batch {}, for circuit {}, at round {} in {:?}", + metadata.id, + metadata.block_number, + metadata.circuit_id, + metadata.aggregation_round, + start_time.elapsed() + ); + WITNESS_VECTOR_GENERATOR_METRICS + .save_time + .observe(start_time.elapsed()); + } + } + Ok(()) + } +} diff --git a/prover/crates/lib/circuit_prover_service/src/witness_vector_generator/witness_vector_generator_metadata_loader.rs b/prover/crates/lib/circuit_prover_service/src/witness_vector_generator/witness_vector_generator_metadata_loader.rs new file mode 100644 index 000000000000..bb0b6ec6e94c --- /dev/null +++ b/prover/crates/lib/circuit_prover_service/src/witness_vector_generator/witness_vector_generator_metadata_loader.rs @@ -0,0 +1,83 @@ +use async_trait::async_trait; +use zksync_prover_dal::{Connection, Prover, ProverDal}; +use zksync_types::{protocol_version::ProtocolSemanticVersion, prover_dal::FriProverJobMetadata}; + +/// Trait responsible for describing the job loading interface. +/// This is necessary as multiple strategies are necessary for loading jobs (which require different implementations). +#[async_trait] +pub trait WitnessVectorMetadataLoader: Sync + Send + 'static { + async fn load_metadata( + &self, + connection: Connection<'_, Prover>, + ) -> Option; +} + +/// Light job MetadataLoader. +/// +/// Most jobs are light, apart from nodes. This loader will only pick non nodes jobs. +#[derive(Debug)] +pub struct LightWitnessVectorMetadataLoader { + pod_name: String, + protocol_version: ProtocolSemanticVersion, +} + +impl LightWitnessVectorMetadataLoader { + pub fn new(pod_name: String, protocol_version: ProtocolSemanticVersion) -> Self { + Self { + pod_name, + protocol_version, + } + } +} + +#[async_trait] +impl WitnessVectorMetadataLoader for LightWitnessVectorMetadataLoader { + async fn load_metadata( + &self, + mut connection: Connection<'_, Prover>, + ) -> Option { + connection + .fri_prover_jobs_dal() + .get_light_job(self.protocol_version, &self.pod_name) + .await + } +} + +/// Heavy job MetadataLoader. +/// +/// Most jobs are light, apart from nodes. This loader will only prioritize node jobs. +/// If none are available, it will fall back to light jobs. +#[derive(Debug)] +pub struct HeavyWitnessVectorMetadataLoader { + pod_name: String, + protocol_version: ProtocolSemanticVersion, +} + +impl HeavyWitnessVectorMetadataLoader { + pub fn new(pod_name: String, protocol_version: ProtocolSemanticVersion) -> Self { + Self { + pod_name, + protocol_version, + } + } +} + +#[async_trait] +impl WitnessVectorMetadataLoader for HeavyWitnessVectorMetadataLoader { + async fn load_metadata( + &self, + mut connection: Connection<'_, Prover>, + ) -> Option { + let metadata = connection + .fri_prover_jobs_dal() + .get_heavy_job(self.protocol_version, &self.pod_name) + .await; + if metadata.is_some() { + return metadata; + } + connection + .fri_prover_jobs_dal() + .get_light_job(self.protocol_version, &self.pod_name) + .await + } +} diff --git a/prover/crates/lib/keystore/src/keystore.rs b/prover/crates/lib/keystore/src/keystore.rs index 6225943e3cd7..ab3b115bc635 100644 --- a/prover/crates/lib/keystore/src/keystore.rs +++ b/prover/crates/lib/keystore/src/keystore.rs @@ -470,6 +470,7 @@ impl Keystore { } /// Async loads mapping of all circuits to setup key, if successful + #[cfg(feature = "gpu")] pub async fn load_all_setup_key_mapping( &self, ) -> anyhow::Result>> { diff --git a/prover/crates/lib/prover_dal/.sqlx/query-3b3193bfac70b5fe69bf3bb7ba5a234c19578572973094b21ddbb3876da6bb95.json b/prover/crates/lib/prover_dal/.sqlx/query-4d89c375af2c211a8a896cad7c99d2c9ff0d28f4662913ef7c2cf6fa1aa430d4.json similarity index 65% rename from prover/crates/lib/prover_dal/.sqlx/query-3b3193bfac70b5fe69bf3bb7ba5a234c19578572973094b21ddbb3876da6bb95.json rename to prover/crates/lib/prover_dal/.sqlx/query-4d89c375af2c211a8a896cad7c99d2c9ff0d28f4662913ef7c2cf6fa1aa430d4.json index 962979344b4b..f84489dd6523 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-3b3193bfac70b5fe69bf3bb7ba5a234c19578572973094b21ddbb3876da6bb95.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-4d89c375af2c211a8a896cad7c99d2c9ff0d28f4662913ef7c2cf6fa1aa430d4.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n id = (\n SELECT\n id\n FROM\n prover_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $2\n ORDER BY\n l1_batch_number ASC,\n aggregation_round ASC,\n circuit_id ASC,\n id ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n prover_jobs_fri.id,\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round,\n prover_jobs_fri.sequence_number,\n prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n ", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n id = (\n SELECT\n id\n FROM\n prover_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $2\n AND aggregation_round = $4\n ORDER BY\n l1_batch_number ASC,\n circuit_id ASC,\n id ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n prover_jobs_fri.id,\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round,\n prover_jobs_fri.sequence_number,\n prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n ", "describe": { "columns": [ { @@ -43,7 +43,8 @@ "Left": [ "Int4", "Int4", - "Text" + "Text", + "Int2" ] }, "nullable": [ @@ -56,5 +57,5 @@ false ] }, - "hash": "3b3193bfac70b5fe69bf3bb7ba5a234c19578572973094b21ddbb3876da6bb95" + "hash": "4d89c375af2c211a8a896cad7c99d2c9ff0d28f4662913ef7c2cf6fa1aa430d4" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-79b5ad4ef1ba888c3ffdb27cf2203367ae4cf57703c532fe3dfe18924c3c9492.json b/prover/crates/lib/prover_dal/.sqlx/query-79b5ad4ef1ba888c3ffdb27cf2203367ae4cf57703c532fe3dfe18924c3c9492.json new file mode 100644 index 000000000000..d1db20fbdbea --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-79b5ad4ef1ba888c3ffdb27cf2203367ae4cf57703c532fe3dfe18924c3c9492.json @@ -0,0 +1,61 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n id = (\n SELECT\n id\n FROM\n prover_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $2\n AND aggregation_round != $4\n ORDER BY\n l1_batch_number ASC,\n aggregation_round ASC,\n circuit_id ASC,\n id ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n prover_jobs_fri.id,\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round,\n prover_jobs_fri.sequence_number,\n prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "circuit_id", + "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "aggregation_round", + "type_info": "Int2" + }, + { + "ordinal": 4, + "name": "sequence_number", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "depth", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "is_node_final_proof", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Text", + "Int2" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false + ] + }, + "hash": "79b5ad4ef1ba888c3ffdb27cf2203367ae4cf57703c532fe3dfe18924c3c9492" +} diff --git a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs index a0420b056125..8efa8e2f6837 100644 --- a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs @@ -1,5 +1,10 @@ #![doc = include_str!("../doc/FriProverDal.md")] -use std::{collections::HashMap, convert::TryFrom, str::FromStr, time::Duration}; +use std::{ + collections::HashMap, + convert::TryFrom, + str::FromStr, + time::{Duration, Instant}, +}; use zksync_basic_types::{ basic_fri_types::{ @@ -60,8 +65,11 @@ impl FriProverDal<'_, '_> { /// - within the lowest batch, look at the lowest aggregation level (move up the proof tree) /// - pick the same type of circuit for as long as possible, this maximizes GPU cache reuse /// - /// NOTE: Most of this function is a duplicate of `get_next_job()`. Get next job will be deleted together with old prover. - pub async fn get_job( + /// Most of this function is similar to `get_light_job()`. + /// The 2 differ in the type of jobs they will load. Node jobs are heavy in resource utilization. + /// + /// NOTE: This function retrieves only node jobs. + pub async fn get_heavy_job( &mut self, protocol_version: ProtocolSemanticVersion, picked_by: &str, @@ -85,6 +93,84 @@ impl FriProverDal<'_, '_> { status = 'queued' AND protocol_version = $1 AND protocol_version_patch = $2 + AND aggregation_round = $4 + ORDER BY + l1_batch_number ASC, + circuit_id ASC, + id ASC + LIMIT + 1 + FOR UPDATE + SKIP LOCKED + ) + RETURNING + prover_jobs_fri.id, + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + prover_jobs_fri.aggregation_round, + prover_jobs_fri.sequence_number, + prover_jobs_fri.depth, + prover_jobs_fri.is_node_final_proof + "#, + protocol_version.minor as i32, + protocol_version.patch.0 as i32, + picked_by, + AggregationRound::NodeAggregation as i64, + ) + .fetch_optional(self.storage.conn()) + .await + .expect("failed to get prover job") + .map(|row| FriProverJobMetadata { + id: row.id as u32, + block_number: L1BatchNumber(row.l1_batch_number as u32), + circuit_id: row.circuit_id as u8, + aggregation_round: AggregationRound::try_from(i32::from(row.aggregation_round)) + .unwrap(), + sequence_number: row.sequence_number as usize, + depth: row.depth as u16, + is_node_final_proof: row.is_node_final_proof, + pick_time: Instant::now(), + }) + } + + /// Retrieves the next prover job to be proven. Called by WVGs. + /// + /// Prover jobs must be thought of as ordered. + /// Prover must prioritize proving such jobs that will make the chain move forward the fastest. + /// Current ordering: + /// - pick the lowest batch + /// - within the lowest batch, look at the lowest aggregation level (move up the proof tree) + /// - pick the same type of circuit for as long as possible, this maximizes GPU cache reuse + /// + /// Most of this function is similar to `get_heavy_job()`. + /// The 2 differ in the type of jobs they will load. Node jobs are heavy in resource utilization. + /// + /// NOTE: This function retrieves all jobs but nodes. + pub async fn get_light_job( + &mut self, + protocol_version: ProtocolSemanticVersion, + picked_by: &str, + ) -> Option { + sqlx::query!( + r#" + UPDATE prover_jobs_fri + SET + status = 'in_progress', + attempts = attempts + 1, + updated_at = NOW(), + processing_started_at = NOW(), + picked_by = $3 + WHERE + id = ( + SELECT + id + FROM + prover_jobs_fri + WHERE + status = 'queued' + AND protocol_version = $1 + AND protocol_version_patch = $2 + AND aggregation_round != $4 ORDER BY l1_batch_number ASC, aggregation_round ASC, @@ -107,6 +193,7 @@ impl FriProverDal<'_, '_> { protocol_version.minor as i32, protocol_version.patch.0 as i32, picked_by, + AggregationRound::NodeAggregation as i64 ) .fetch_optional(self.storage.conn()) .await @@ -120,6 +207,7 @@ impl FriProverDal<'_, '_> { sequence_number: row.sequence_number as usize, depth: row.depth as u16, is_node_final_proof: row.is_node_final_proof, + pick_time: Instant::now(), }) } @@ -181,9 +269,9 @@ impl FriProverDal<'_, '_> { sequence_number: row.sequence_number as usize, depth: row.depth as u16, is_node_final_proof: row.is_node_final_proof, + pick_time: Instant::now(), }) } - pub async fn get_next_job_for_circuit_id_round( &mut self, circuits_to_pick: &[CircuitIdRoundTuple], @@ -271,6 +359,7 @@ impl FriProverDal<'_, '_> { sequence_number: row.sequence_number as usize, depth: row.depth as u16, is_node_final_proof: row.is_node_final_proof, + pick_time: Instant::now(), }) } @@ -359,6 +448,7 @@ impl FriProverDal<'_, '_> { sequence_number: row.sequence_number as usize, depth: row.depth as u16, is_node_final_proof: row.is_node_final_proof, + pick_time: Instant::now(), }) .unwrap() } diff --git a/prover/crates/lib/prover_job_processor/Cargo.toml b/prover/crates/lib/prover_job_processor/Cargo.toml new file mode 100644 index 000000000000..5197b33b1f95 --- /dev/null +++ b/prover/crates/lib/prover_job_processor/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "zksync_prover_job_processor" +description = "ZKsync Prover Job Processor" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +async-trait.workspace = true +anyhow.workspace = true +futures.workspace = true +tokio.workspace = true +tokio-stream.workspace = true +tokio-util.workspace = true +tracing.workspace = true +vise.workspace = true +strum.workspace = true diff --git a/prover/crates/lib/prover_job_processor/README.md b/prover/crates/lib/prover_job_processor/README.md new file mode 100644 index 000000000000..5eea5476d05d --- /dev/null +++ b/prover/crates/lib/prover_job_processor/README.md @@ -0,0 +1,152 @@ +# Prover Job Processor + +Prover Job Processor aims to be a small "framework" that allows building prover components at break-neck speeds. + +## Context + +Previously, prover components were hand tailored and had similar issues spread across the codebase. The "framework"'s +purpose is to standardize implementations and lift the undifferentiated work from prover component developers. + +## How it works + +The "framework" exports 4 main primitives: + +- executor +- job_picker +- job_saver +- job_runner + +### Executor + +This is the most important trait. It is meant to execute the crypto primitives (or any other payloads) and defines what +the inputs are, what is the metadata that has to travel with it and what the output will be. Executors will receive +information from Job Picker and will provide it further to Job Saver. + +For example, this could witness vector generator (synthesis of witness vector) or circuit prover (GPU circuit proving & +verification). Each would define what they need as input to operate and what they'll output. + +### Job Picker + +The starting point of the process. This trait is tied to Executor and will pick a metadata & input that corresponds to +the Executor. Job Picker picks information and provides it to Executor. + +As examples, for witness vector generator it would be a query to the database & a query to object storage. For circuit +prover, it would be waiting on the communication channel between witness vector generator and circuit prover. + +### Job Saver + +The final point of the process. This trait is tied to Executor and will receive metadata & output that corresponds to +the Executor. Job Saver receives information from Executor and saves it. + +Continuing with the same examples, for witness vector generator it would send the information to the communication +channel between witness vector generator & circuit prover. For circuit prover example, it would simply store the +information to database & object store. + +### Job Runner + +A wrapper over all 3 traits above, ensuring they communicate to each other as expected & they are spawned as +long-running threads. + +## Diagram + +```mermaid +sequenceDiagram + participant p as Job Picker + participant e as Executor + participant s as Job Saver + + p-->>p: Get metadata & input + p-->>e: Provide metadata & input + e-->>e: Execute + e-->>s: Provide metadata & output + s-->>s: Save output +``` + +## How to use it + +If you want to create a new prover component, you'd need to first define what are the communication boundaries: + +- metadata +- input +- output + +With these out of the way, you can specify the Executor and even integrate the crypto primitive. At this point in time +you could fully cover it with unit tests to make sure the functionality works as intended. + +Moving forward, you'll need to understand where you get this information and where you store it. These are your Job +Picker & Job saver. NOTE: Just like the executor, you need to implement the logic of executing/picking/saving a single +job, the "framework" will take care of looping it over and transmitting the details from one end to another. + +Once done, provide them as arguments to JobRunner, call `your_job_runner.run()` and you're good to go. + +TODO: Add example once testing is in place. + +## More (internal) details + +There are a few things that we've glossed over, let's get into details: + +### Back-offs & cancelling + +As you might've guessed, from a production point of view, you need to make sure that the process can die gracefully (k8s +sigterm), without being a nuisance to your dependencies (think DB or object store). As such, job picker can have an +optional component responsible for back-off & cancelling. + +### How do components communicate + +Internally, `JobRunner` wraps all 3 primitives into a task that are looping in a `while channel.recv() {}`. Each task is +slightly special, but the logic is far from complex. + +### Limitations + +Back off & cancelling is implemented only for job picker. Whilst it might sound inconvenient, in practice it works +great. When the cancel is received, the job picker will stop picking jobs, the executor will keep executing until there +are no more jobs in the receiver and the saver will save all jobs until there are no more jobs received from executor. + +Backoff is currently hardcoded, but it is trivial to make it more configurable. + +Whilst not a limitation, the first version is applied only to `circuit_provers`. It's very likely that more enhancements +will be needed to accommodate the rest of the codebase. Treat this as work in progress. + +## Objectives + +The "framework" wants to achieve the following: + +1. Reduce code complexity & technical debt (modularize the codebase) +2. Empower testability of the prover codebase +3. Optimize prover components for speed and multi-datacenter/multi-cloud setups +4. Increase speed of delivery of prover components +5. Enable external shops to implement their own flavors of prover components + +### 1. Reduce code complexity & technical debt (modularize the codebase) + +Previously, most prover components were custom written. This meant that the same logic was reimplemented across multiple +components. Whilst the "framework" doesn't fully solve the problem, it drastically reduces the amount of code needed to +start a new components. + +The rest of the code duplication can be tackled in the future as part of the node framework. + +### 2. Empower testability of the prover codebase + +Due to the entangled nature of the code, prover codebase was difficult to test. Current modular setup enables testing in +isolation each component. (not exactly true, given cryptography dependencies are too heavy - but will be true in the new +prover implementation) + +### 3. Optimize prover components for speed and multi-datacenter/multi-cloud setups + +Previously, provers were running "sync". Load job, once loaded, execute it, once executed, save its result. Whilst this +is fine, all steps can be done in parallel. This becomes super important when database and running machine are far away +and the round trip to database can cause up to 50% of the entire time. In a multi-cloud (read as future) setup, this +becomes even more painful. For free, we remove the current bottleneck from database (which was previous bottleneck, due +to # of connections). + +### 4. Increase speed of delivery of prover components + +Boojum release was rather slow and even releasing the current `circuit_prover` took longer than anticipated. Given +upcoming prover updates, this release sets us for success going forward. Furthermore, experimenting with different +setups becomes a matter of days, rather than months. + +### 5. Enable external shops to implement their own flavors of prover components + +Most external folks have to fork zksync-era and keep an up-to-date fork if anything needs to be modified. The framework +allows using the executors, whilst defining custom pickers/savers. This will be a massive time-save for any external +shop that wants to innovate on top of zksync-era's provers. diff --git a/prover/crates/bin/circuit_prover/src/backoff.rs b/prover/crates/lib/prover_job_processor/src/backoff_and_cancellable.rs similarity index 60% rename from prover/crates/bin/circuit_prover/src/backoff.rs rename to prover/crates/lib/prover_job_processor/src/backoff_and_cancellable.rs index 6ddb3d94be35..15d80404dc71 100644 --- a/prover/crates/bin/circuit_prover/src/backoff.rs +++ b/prover/crates/lib/prover_job_processor/src/backoff_and_cancellable.rs @@ -1,5 +1,24 @@ use std::{ops::Mul, time::Duration}; +use tokio_util::sync::CancellationToken; + +/// Utility struct that provides cancellation awareness & backoff capabilities. +/// They usually go hand in hand, having a wrapper over both simplifies implementation. +#[derive(Debug, Clone)] +pub struct BackoffAndCancellable { + pub(crate) backoff: Backoff, + pub(crate) cancellation_token: CancellationToken, +} + +impl BackoffAndCancellable { + pub fn new(backoff: Backoff, cancellation_token: CancellationToken) -> Self { + Self { + backoff, + cancellation_token, + } + } +} + /// Backoff - convenience structure that takes care of backoff timings. #[derive(Debug, Clone)] pub struct Backoff { @@ -7,12 +26,10 @@ pub struct Backoff { current_delay: Duration, max_delay: Duration, } - impl Backoff { /// The delay multiplication coefficient. // Currently it's hardcoded, but could be provided in the constructor. const DELAY_MULTIPLIER: u32 = 2; - /// Create a backoff with base_delay (first delay) and max_delay (maximum delay possible). pub fn new(base_delay: Duration, max_delay: Duration) -> Self { Backoff { @@ -37,3 +54,10 @@ impl Backoff { self.current_delay = self.base_delay; } } + +impl Default for Backoff { + /// Sensible database specific delays. + fn default() -> Self { + Self::new(Duration::from_secs(1), Duration::from_secs(5)) + } +} diff --git a/prover/crates/lib/prover_job_processor/src/executor.rs b/prover/crates/lib/prover_job_processor/src/executor.rs new file mode 100644 index 000000000000..80b019960e3e --- /dev/null +++ b/prover/crates/lib/prover_job_processor/src/executor.rs @@ -0,0 +1,11 @@ +/// Executor trait, responsible for defining what a job's execution will look like. +/// The trait covers what it expects as input, what it'll offer as output and what metadata needs to travel together with the input. +/// This is the backbone of the `prover_job_processor` from a user's point of view. +pub trait Executor: Send + Sync + 'static { + type Input: Send; + type Output: Send; + type Metadata: Send + Clone; + + fn execute(&self, input: Self::Input, metadata: Self::Metadata) + -> anyhow::Result; +} diff --git a/prover/crates/lib/prover_job_processor/src/job_picker.rs b/prover/crates/lib/prover_job_processor/src/job_picker.rs new file mode 100644 index 000000000000..74ecbcde5d74 --- /dev/null +++ b/prover/crates/lib/prover_job_processor/src/job_picker.rs @@ -0,0 +1,18 @@ +use async_trait::async_trait; + +use crate::Executor; + +/// Job Picker trait, in charge of getting a new job for executor. +/// NOTE: Job Pickers are tied to an executor, which ensures input/output/metadata types match. +#[async_trait] +pub trait JobPicker: Send + Sync + 'static { + type ExecutorType: Executor; + async fn pick_job( + &mut self, + ) -> anyhow::Result< + Option<( + ::Input, + ::Metadata, + )>, + >; +} diff --git a/prover/crates/lib/prover_job_processor/src/job_runner.rs b/prover/crates/lib/prover_job_processor/src/job_runner.rs new file mode 100644 index 000000000000..2a2d803e206d --- /dev/null +++ b/prover/crates/lib/prover_job_processor/src/job_runner.rs @@ -0,0 +1,69 @@ +use tokio::task::JoinHandle; + +use crate::{ + task_wiring::{JobPickerTask, JobSaverTask, Task, WorkerPool}, + BackoffAndCancellable, Executor, JobPicker, JobSaver, +}; + +/// It's preferred to have a minimal amount of jobs in flight at any given time. +/// This ensures that memory usage is minimized, in case of failures, a small amount of jobs is lost and +/// components can apply back pressure to each other in case of misconfiguration. +const CHANNEL_SIZE: usize = 1; + +/// The "framework" wrapper that runs the entire machinery. +/// Job Runner is responsible for tying together tasks (picker, executor, saver) and starting them. +#[derive(Debug)] +pub struct JobRunner +where + E: Executor, + P: JobPicker, + S: JobSaver, +{ + executor: E, + picker: P, + saver: S, + num_workers: usize, + picker_backoff_and_cancellable: Option, +} + +impl JobRunner +where + E: Executor, + P: JobPicker, + S: JobSaver, +{ + pub fn new( + executor: E, + picker: P, + saver: S, + num_workers: usize, + picker_backoff_and_cancellable: Option, + ) -> Self { + Self { + executor, + picker, + saver, + num_workers, + picker_backoff_and_cancellable, + } + } + + /// Runs job runner tasks. + pub fn run(self) -> Vec>> { + let (input_tx, input_rx) = + tokio::sync::mpsc::channel::<(E::Input, E::Metadata)>(CHANNEL_SIZE); + let (result_tx, result_rx) = + tokio::sync::mpsc::channel::<(anyhow::Result, E::Metadata)>(CHANNEL_SIZE); + + let picker_task = + JobPickerTask::new(self.picker, input_tx, self.picker_backoff_and_cancellable); + let worker_pool = WorkerPool::new(self.executor, self.num_workers, input_rx, result_tx); + let saver_task = JobSaverTask::new(self.saver, result_rx); + + vec![ + tokio::spawn(picker_task.run()), + tokio::spawn(worker_pool.run()), + tokio::spawn(saver_task.run()), + ] + } +} diff --git a/prover/crates/lib/prover_job_processor/src/job_saver.rs b/prover/crates/lib/prover_job_processor/src/job_saver.rs new file mode 100644 index 000000000000..4c0833dd77a4 --- /dev/null +++ b/prover/crates/lib/prover_job_processor/src/job_saver.rs @@ -0,0 +1,19 @@ +use async_trait::async_trait; + +use crate::Executor; + +/// Job Saver trait, in charge of getting the result from the executor and dispatching it. +/// Dispatch could be storing it, or sending to a separate component. +/// NOTE: Job Savers are tied to an executor, which ensures input/output/metadata types match. +#[async_trait] +pub trait JobSaver: Send + Sync + 'static { + type ExecutorType: Executor; + + async fn save_job_result( + &self, + data: ( + anyhow::Result<::Output>, + ::Metadata, + ), + ) -> anyhow::Result<()>; +} diff --git a/prover/crates/lib/prover_job_processor/src/lib.rs b/prover/crates/lib/prover_job_processor/src/lib.rs new file mode 100644 index 000000000000..02847be533ff --- /dev/null +++ b/prover/crates/lib/prover_job_processor/src/lib.rs @@ -0,0 +1,19 @@ +pub use backoff_and_cancellable::{Backoff, BackoffAndCancellable}; +pub use executor::Executor; +pub use job_picker::JobPicker; +pub use job_runner::JobRunner; +pub use job_saver::JobSaver; + +mod backoff_and_cancellable; +mod executor; +mod job_picker; +mod job_runner; +mod job_saver; +mod task_wiring; + +// convenience aliases to simplify declarations +type Input

= <

::ExecutorType as Executor>::Input; +type PickerMetadata

= <

::ExecutorType as Executor>::Metadata; + +type Output = <::ExecutorType as Executor>::Output; +type SaverMetadata = <::ExecutorType as Executor>::Metadata; diff --git a/prover/crates/lib/prover_job_processor/src/task_wiring/job_picker_task.rs b/prover/crates/lib/prover_job_processor/src/task_wiring/job_picker_task.rs new file mode 100644 index 000000000000..f3e5e3ea4686 --- /dev/null +++ b/prover/crates/lib/prover_job_processor/src/task_wiring/job_picker_task.rs @@ -0,0 +1,77 @@ +use anyhow::Context; +use async_trait::async_trait; + +use crate::{task_wiring::task::Task, BackoffAndCancellable, Input, JobPicker, PickerMetadata}; + +/// Wrapper over JobPicker. Makes it a continuous task, picking tasks until cancelled. +#[derive(Debug)] +pub struct JobPickerTask { + picker: P, + input_tx: tokio::sync::mpsc::Sender<(Input

, PickerMetadata

)>, + backoff_and_cancellable: Option, +} + +impl JobPickerTask

{ + pub fn new( + picker: P, + input_tx: tokio::sync::mpsc::Sender<(Input

, PickerMetadata

)>, + backoff_and_cancellable: Option, + ) -> Self { + Self { + picker, + input_tx, + backoff_and_cancellable, + } + } + + /// Backs off for the specified amount of time or until cancel is received, if available. + async fn backoff(&mut self) { + if let Some(backoff_and_cancellable) = &mut self.backoff_and_cancellable { + let backoff_duration = backoff_and_cancellable.backoff.delay(); + tracing::info!("Backing off for {:?}...", backoff_duration); + // Error here corresponds to a timeout w/o receiving task_wiring cancel; we're OK with this. + tokio::time::timeout( + backoff_duration, + backoff_and_cancellable.cancellation_token.cancelled(), + ) + .await + .ok(); + } + } + + /// Resets backoff to initial state, if available. + fn reset_backoff(&mut self) { + if let Some(backoff_and_cancellable) = &mut self.backoff_and_cancellable { + backoff_and_cancellable.backoff.reset(); + } + } + + /// Checks if the task is cancelled, if available. + fn is_cancelled(&self) -> bool { + if let Some(backoff_and_cancellable) = &self.backoff_and_cancellable { + return backoff_and_cancellable.cancellation_token.is_cancelled(); + } + false + } +} + +#[async_trait] +impl Task for JobPickerTask

{ + async fn run(mut self) -> anyhow::Result<()> { + while !self.is_cancelled() { + match self.picker.pick_job().await.context("failed to pick job")? { + Some((input, metadata)) => { + self.input_tx.send((input, metadata)).await.map_err(|err| { + anyhow::anyhow!("job picker failed to pass job to executor: {}", err) + })?; + self.reset_backoff(); + } + None => { + self.backoff().await; + } + } + } + tracing::info!("Stop signal received, shutting down JobPickerTask..."); + Ok(()) + } +} diff --git a/prover/crates/lib/prover_job_processor/src/task_wiring/job_saver_task.rs b/prover/crates/lib/prover_job_processor/src/task_wiring/job_saver_task.rs new file mode 100644 index 000000000000..8573821bc902 --- /dev/null +++ b/prover/crates/lib/prover_job_processor/src/task_wiring/job_saver_task.rs @@ -0,0 +1,33 @@ +use anyhow::Context; +use async_trait::async_trait; + +use crate::{task_wiring::task::Task, JobSaver, Output, SaverMetadata}; + +/// Wrapper over JobSaver. Makes it a continuous task, picking tasks until execution channel is closed. +#[derive(Debug)] +pub struct JobSaverTask { + saver: S, + result_rx: tokio::sync::mpsc::Receiver<(anyhow::Result>, SaverMetadata)>, +} + +impl JobSaverTask { + pub fn new( + saver: S, + result_rx: tokio::sync::mpsc::Receiver<(anyhow::Result>, SaverMetadata)>, + ) -> Self { + Self { saver, result_rx } + } +} + +#[async_trait] +impl Task for JobSaverTask { + async fn run(mut self) -> anyhow::Result<()> { + while let Some(data) = self.result_rx.recv().await { + self.saver + .save_job_result(data) + .await + .context("failed to save result")?; + } + Ok(()) + } +} diff --git a/prover/crates/lib/prover_job_processor/src/task_wiring/mod.rs b/prover/crates/lib/prover_job_processor/src/task_wiring/mod.rs new file mode 100644 index 000000000000..4b1ded605f50 --- /dev/null +++ b/prover/crates/lib/prover_job_processor/src/task_wiring/mod.rs @@ -0,0 +1,9 @@ +pub use job_picker_task::JobPickerTask; +pub use job_saver_task::JobSaverTask; +pub use task::Task; +pub use worker_pool::WorkerPool; + +mod job_picker_task; +mod job_saver_task; +mod task; +mod worker_pool; diff --git a/prover/crates/lib/prover_job_processor/src/task_wiring/task.rs b/prover/crates/lib/prover_job_processor/src/task_wiring/task.rs new file mode 100644 index 000000000000..68f8156b67c1 --- /dev/null +++ b/prover/crates/lib/prover_job_processor/src/task_wiring/task.rs @@ -0,0 +1,7 @@ +use async_trait::async_trait; + +/// Convenience trait to tie together all task wrappers. +#[async_trait] +pub trait Task { + async fn run(mut self) -> anyhow::Result<()>; +} diff --git a/prover/crates/lib/prover_job_processor/src/task_wiring/worker_pool.rs b/prover/crates/lib/prover_job_processor/src/task_wiring/worker_pool.rs new file mode 100644 index 000000000000..2f788ae99746 --- /dev/null +++ b/prover/crates/lib/prover_job_processor/src/task_wiring/worker_pool.rs @@ -0,0 +1,64 @@ +use std::sync::Arc; + +use async_trait::async_trait; +use futures::stream::StreamExt; +use tokio_stream::wrappers::ReceiverStream; + +use crate::{executor::Executor, task_wiring::Task}; + +/// Wrapper over Executor. Makes it a continuous task, picking tasks until picker channel is closed. +/// It can execute multiple concurrent executors, up to specified limit. +#[derive(Debug)] +pub struct WorkerPool +where + E: Executor, +{ + executor: E, + num_workers: usize, + input_rx: tokio::sync::mpsc::Receiver<(E::Input, E::Metadata)>, + result_tx: tokio::sync::mpsc::Sender<(anyhow::Result, E::Metadata)>, +} + +impl WorkerPool { + pub fn new( + executor: E, + num_workers: usize, + input_rx: tokio::sync::mpsc::Receiver<(E::Input, E::Metadata)>, + result_tx: tokio::sync::mpsc::Sender<(anyhow::Result, E::Metadata)>, + ) -> Self { + Self { + executor, + num_workers, + input_rx, + result_tx, + } + } +} + +#[async_trait] +impl Task for WorkerPool { + async fn run(mut self) -> anyhow::Result<()> { + let executor = Arc::new(self.executor); + let num_workers = self.num_workers; + let stream = ReceiverStream::new(self.input_rx); + + stream + .for_each_concurrent(num_workers, move |(input, metadata)| { + let executor = executor.clone(); + let result_tx = self.result_tx.clone(); + let exec_metadata = metadata.clone(); + async move { + let payload = + tokio::task::spawn_blocking(move || executor.execute(input, exec_metadata)) + .await + .expect("failed executing"); + result_tx + .send((payload, metadata)) + .await + .expect("job saver channel has been closed unexpectedly"); + } + }) + .await; + Ok(()) + } +} diff --git a/prover/docs/.gitignore b/prover/docs/.gitignore new file mode 100644 index 000000000000..7585238efedf --- /dev/null +++ b/prover/docs/.gitignore @@ -0,0 +1 @@ +book diff --git a/prover/docs/99_further_reading.md b/prover/docs/99_further_reading.md deleted file mode 100644 index 64487a715d57..000000000000 --- a/prover/docs/99_further_reading.md +++ /dev/null @@ -1,13 +0,0 @@ -# Further reading - -The documentation in this section aimed to provide a practical overview of the prover workspace, e.g. help people to -understand how to run provers and what they do. - -However, we have some documentation that is more focused on theory of proving in the [core workspace docs](../../docs/). - -You may find the following articles helpful for general understanding of ZK proofs: - -- [ZK intuition](../../docs/guides/advanced/13_zk_intuition.md). -- [ZK deeper overview](../../docs/guides/advanced/14_zk_deeper_overview.md). -- [Prover keys](../../docs/guides/advanced/15_prover_keys.md). -- [Overview of our ZK proving system implementation](../../docs/specs/prover/). diff --git a/prover/docs/book.toml b/prover/docs/book.toml new file mode 100644 index 000000000000..8e0a72942acd --- /dev/null +++ b/prover/docs/book.toml @@ -0,0 +1,32 @@ +[book] +authors = ["ZKsync team"] +language = "en" +multilingual = false +src = "src" +title = "ZKsync Prover Documentation" + +[output.html] +smart-punctuation = true +mathjax-support = true +git-repository-url = "https://github.com/matter-labs/zksync-era/tree/main/prover/docs" +edit-url-template = "https://github.com/matter-labs/zksync-era/tree/main/prover/docs/{path}" +additional-js = ["js/version-box.js", "js/mermaid-init.js"] +additional-css = ["css/version-box.css"] + +[output.html.playground] +editable = true +line-numbers = true + +[output.html.search] +limit-results = 20 +use-boolean-and = true +boost-title = 2 +boost-hierarchy = 2 +boost-paragraph = 1 +expand = true +heading-split-level = 2 + +[preprocessor] + +[preprocessor.mermaid] +command = "mdbook-mermaid" diff --git a/prover/docs/css/version-box.css b/prover/docs/css/version-box.css new file mode 100644 index 000000000000..4006ac7804b3 --- /dev/null +++ b/prover/docs/css/version-box.css @@ -0,0 +1,46 @@ +#version-box { + display: flex; + align-items: center; + margin-right: 15px; /* Space from the right side */ + background-color: transparent; /* Make the box background transparent */ +} + +/* Base styles for the version selector */ +#version-selector { + background-color: transparent; /* Remove background color */ + border: 1px solid #4a5568; /* Subtle border */ + border-radius: 4px; /* Rounded edges */ + padding: 5px 10px; /* Padding inside dropdown */ + font-size: 0.9em; + font-weight: normal; + outline: none; /* Removes default focus outline */ + cursor: pointer; +} + +/* Text color for dark themes */ +.theme-navy #version-selector, +.theme-coal #version-selector { + color: #f7fafc; /* Light text color for dark backgrounds */ +} + +/* Text color for light theme */ +.theme-light #version-selector { + color: #333333; /* Dark text color for light background */ +} + +/* Hover effect for better user feedback */ +#version-selector:hover { + background-color: rgba(255, 255, 255, 0.1); /* Light hover effect */ +} + +/* Optional: Style for when the selector is focused */ +#version-selector:focus { + border-color: #63b3ed; /* Accent color for focused state */ +} + +.right-buttons { + display: flex; + flex-direction: row; /* Aligns items in a row, left to right */ + align-items: center; /* Centers items vertically */ + gap: 10px; /* Adds space between items */ +} diff --git a/prover/docs/js/mermaid-init.js b/prover/docs/js/mermaid-init.js new file mode 100644 index 000000000000..15a7f4e57c60 --- /dev/null +++ b/prover/docs/js/mermaid-init.js @@ -0,0 +1,35 @@ +(() => { + const darkThemes = ['ayu', 'navy', 'coal']; + const lightThemes = ['light', 'rust']; + + const classList = document.getElementsByTagName('html')[0].classList; + + let lastThemeWasLight = true; + for (const cssClass of classList) { + if (darkThemes.includes(cssClass)) { + lastThemeWasLight = false; + break; + } + } + + const theme = lastThemeWasLight ? 'default' : 'dark'; + mermaid.initialize({ startOnLoad: true, theme }); + + // Simplest way to make mermaid re-render the diagrams in the new theme is via refreshing the page + + for (const darkTheme of darkThemes) { + document.getElementById(darkTheme).addEventListener('click', () => { + if (lastThemeWasLight) { + window.location.reload(); + } + }); + } + + for (const lightTheme of lightThemes) { + document.getElementById(lightTheme).addEventListener('click', () => { + if (!lastThemeWasLight) { + window.location.reload(); + } + }); + } +})(); diff --git a/prover/docs/js/version-box.js b/prover/docs/js/version-box.js new file mode 100644 index 000000000000..a7d053e01b47 --- /dev/null +++ b/prover/docs/js/version-box.js @@ -0,0 +1,61 @@ +document.addEventListener('DOMContentLoaded', function () { + // Get the base URL from the mdBook configuration + const baseUrl = document.location.origin + '/zksync-era/prover'; + + // Function to create version selector + function createVersionSelector(versions) { + const versionSelector = document.createElement('select'); + versionSelector.id = 'version-selector'; + + // Get the current path + const currentPath = window.location.pathname; + + // Iterate over the versions object + for (const [versionName, versionUrl] of Object.entries(versions)) { + const option = document.createElement('option'); + option.value = versionUrl + '/'; + option.textContent = versionName; + + // Check if the current URL matches this option's value + if (currentPath.includes(option.value)) { + option.selected = true; // Set this option as selected + } + + versionSelector.appendChild(option); + } + + // Event listener to handle version change + versionSelector.addEventListener('change', function () { + const selectedVersion = versionSelector.value; + // Redirect to the selected version URL + window.location.href = '/zksync-era/prover' + selectedVersion; + }); + + return versionSelector; + } + + // Fetch versions from JSON file + fetch(baseUrl + '/versions.json') + .then((response) => { + if (!response.ok) { + throw new Error('Network response was not ok ' + response.statusText); + } + return response.json(); + }) + .then((data) => { + const versionSelector = createVersionSelector(data); + const nav = document.querySelector('.right-buttons'); + + if (nav) { + const versionBox = document.createElement('div'); + versionBox.id = 'version-box'; + versionBox.appendChild(versionSelector); + nav.appendChild(versionBox); // Append to the .right-buttons container + } else { + console.error('.right-buttons element not found.'); + } + }) + .catch((error) => { + console.error('There has been a problem with your fetch operation:', error); + }); +}); diff --git a/prover/docs/00_intro.md b/prover/docs/src/00_intro.md similarity index 100% rename from prover/docs/00_intro.md rename to prover/docs/src/00_intro.md diff --git a/prover/docs/01_gcp_vm.md b/prover/docs/src/01_gcp_vm.md similarity index 100% rename from prover/docs/01_gcp_vm.md rename to prover/docs/src/01_gcp_vm.md diff --git a/prover/docs/02_setup.md b/prover/docs/src/02_setup.md similarity index 100% rename from prover/docs/02_setup.md rename to prover/docs/src/02_setup.md diff --git a/prover/docs/03_launch.md b/prover/docs/src/03_launch.md similarity index 60% rename from prover/docs/03_launch.md rename to prover/docs/src/03_launch.md index 203fb6e8cecf..fcddf93174b9 100644 --- a/prover/docs/03_launch.md +++ b/prover/docs/src/03_launch.md @@ -2,37 +2,25 @@ ## Preparing -First, run the following command: +First, create a new chain with prover mode `GPU`: -``` -zk env prover-local +```bash +zkstack chain create --prover-mode gpu ``` -It will create a config similar to `dev`, but with: +It will create a config similar to `era`, but with: - Proof sending mode set to `OnlyRealProofs` - Prover mode set to `Local` instead of `GCS`. -You can always switch back to dev config via `zk env dev`. - -**Important:** If you change environments, you have to do `zk init` again. - -## Enter the prover workspace - -All the commands for binaries in the prover workspace must be done from the prover folder: - -``` -cd $ZKSYNC_HOME/prover -``` - ## Key generation This operation should only be done once; if you already generated keys, you can skip it. The following command will generate the required keys: -``` -zk f cargo run --features gpu --release --bin key_generator -- generate-sk-gpu all --recompute-if-missing +```bash +zkstack prover setup-keys ``` With that, you should be ready to run the prover. @@ -40,20 +28,20 @@ With that, you should be ready to run the prover. ## Running Important! Generating a proof takes a lot of time, so if you just want to see whether you can generate a proof, do it -against clean sequencer state (e.g. right after `zk init`). +against clean sequencer state (e.g. right after `zkstack chain init`). We will be running a bunch of binaries, it's recommended to run each in a separate terminal. ### Server -``` -zk server --components=api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip +```bash +zkstack server --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip ``` -### Proof data handler +### Prover gateway -``` -zk f cargo run --release --bin zksync_prover_fri_gateway +```bash +zkstack prover run --component=gateway ``` Then wait until the first job is picked up. Prover gateway has to insert protocol information into the database, and @@ -63,8 +51,8 @@ until it happens, witness generators will panic and won't be able to start. Once a job is created, start witness generators: -``` -zk f cargo run --release --bin zksync_witness_generator -- --all_rounds +```bash +zkstack prover run --component=witness-generator --round=all-rounds ``` `--all_rounds` means that witness generator will produce witnesses of all kinds. You can run a witness generator for @@ -72,22 +60,47 @@ each round separately, but it's mostly useful in production environments. ### Witness vector generator -``` -zk f cargo run --release --bin zksync_witness_vector_generator -- --threads 10 +```bash +zkstack prover run --component=witness-vector-generator --threads 10 ``` WVG prepares inputs for prover, and it's a single-threaded time-consuming operation. You may run several jobs by changing the `threads` parameter. The exact amount of WVGs needed to "feed" one prover depends on CPU/GPU specs, but a ballpark estimate (useful for local development) is 10 WVGs per prover. +> NOTE: The WVG thread typically uses approximately 10GB of RAM. + ### Prover -``` -zk f cargo run --features "gpu" --release --bin zksync_prover_fri +```bash +zkstack prover run --component=prover ``` Prover can prove any kinds of circuits, so you only need a single instance. +### Prover job monitor + +You can start the prover job monitor by specifying its component as follows. + +```bash +zkstack prover run --component=prover-job-monitor +``` + +### Insert protocol version in prover database + +Before running the prover, you can insert the protocol version in the prover database by executing the following +command: + +```bash +zkstack dev prover insert-version --version --snark-wrapper= +``` + +To query this information, use the following command: + +```bash +zkstack dev prover info +``` + ### Proof compressor ⚠️ Both prover and proof compressor require 24GB of VRAM, and currently it's not possible to make them use different @@ -96,8 +109,8 @@ GPU. So unless you have a GPU with 48GB of VRAM, you won't be able to run both a You should wait until the proof is generated, and once you see in the server logs that it tries to find available compressor, you can shut the prover down, and run the proof compressor: -``` -zk f cargo run --features "gpu" --release --bin zksync_proof_fri_compressor +```bash +zkstack prover run --component=compressor ``` Once the proof is compressed, proof gateway will see that and will send the generated proof back to core. diff --git a/prover/docs/04_flow.md b/prover/docs/src/04_flow.md similarity index 100% rename from prover/docs/04_flow.md rename to prover/docs/src/04_flow.md diff --git a/prover/docs/05_proving_batch.md b/prover/docs/src/05_proving_batch.md similarity index 100% rename from prover/docs/05_proving_batch.md rename to prover/docs/src/05_proving_batch.md diff --git a/prover/docs/src/99_further_reading.md b/prover/docs/src/99_further_reading.md new file mode 100644 index 000000000000..7b916167dbdd --- /dev/null +++ b/prover/docs/src/99_further_reading.md @@ -0,0 +1,14 @@ +# Further reading + +The documentation in this section aimed to provide a practical overview of the prover workspace, e.g. help people to +understand how to run provers and what they do. + +However, we have some documentation that is more focused on theory of proving in the +[core workspace docs](https://matter-labs.github.io/zksync-era/core/latest). + +You may find the following articles helpful for general understanding of ZK proofs: + +- [ZK intuition](https://matter-labs.github.io/zksync-era/core/latest/guides/advanced/13_zk_intuition.html). +- [ZK deeper overview](https://matter-labs.github.io/zksync-era/core/latest/docs/guides/advanced/14_zk_deeper_overview.html). +- [Prover keys](https://matter-labs.github.io/zksync-era/core/latest/docs/guides/advanced/15_prover_keys.html). +- [Overview of our ZK proving system implementation](https://matter-labs.github.io/zksync-era/core/latest/specs/prover/overview.html). diff --git a/prover/docs/src/README.md b/prover/docs/src/README.md new file mode 100644 index 000000000000..991c91219e99 --- /dev/null +++ b/prover/docs/src/README.md @@ -0,0 +1,16 @@ +# Prover subsystem documentation + +This is technical documentation for the prover subsystem. It aims to help developers to set up a development environment +for working with provers. This documentation assumes that you are already familiar with how ZKsync works, and you need +to be able to work with the prover code. + +It does not cover topics such as basics of ZK or production deployment for provers. + +## Table of contents + +- [Intro](00_intro.md) +- [Setting up a GCP VM](01_gcp_vm.md) +- [Workspace setup](02_setup.md) +- [Running prover subsystem](03_launch.md) +- [Proof generation flow](04_flow.md) +- [Further reading](99_further_reading.md) diff --git a/prover/docs/src/SUMMARY.md b/prover/docs/src/SUMMARY.md new file mode 100644 index 000000000000..d4a6fa15d778 --- /dev/null +++ b/prover/docs/src/SUMMARY.md @@ -0,0 +1,11 @@ +# Summary + +[Introduction](./00_intro.md) + +- [Creating a GCP VM](./01_gcp_vm.md) +- [Development environment setup](./02_setup.md) +- [Running provers](./03_launch.md) +- [Prover flow](./04_flow.md) +- [Proving a batch](./05_proving_batch.md) + +[Further reading](./99_further_reading.md) diff --git a/prover/docs/theme/head.hbs b/prover/docs/theme/head.hbs new file mode 100644 index 000000000000..66ee37538adf --- /dev/null +++ b/prover/docs/theme/head.hbs @@ -0,0 +1 @@ + diff --git a/renovate.json b/renovate.json index eeccfee848dc..fd09d70ffe4b 100644 --- a/renovate.json +++ b/renovate.json @@ -1,5 +1,13 @@ { - "extends": ["config:base", "schedule:earlyMondays","helpers:pinGitHubActionDigests"], + "extends": ["config:base", "helpers:pinGitHubActionDigests"], "enabledManagers": ["github-actions"], - "prCreation": "immediate" + "prCreation": "not-pending", + "groupName": "github actions monthly updates", + "schedule": ["monthly"], + "packageRules": [ + { + "managers": ["github-actions"], + "groupName": "all-github-actions-updates" + } + ] } diff --git a/rust-toolchain b/rust-toolchain index 03c040b91f1f..bc5d1d6bbd8e 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1,2 @@ -nightly-2024-08-01 +[toolchain] +channel = "nightly-2024-08-01" diff --git a/yarn.lock b/yarn.lock index 255bd901e035..15fb8bb7d967 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1727,24 +1727,15 @@ resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-deploy/-/hardhat-zksync-deploy-1.5.0.tgz#40cb454fb187da4bb354f3acb48762a6657fcb36" integrity sha512-7LAgYYwoKWHeR+3CyWEvA3NKBKtt7ktcr7SX6ZPgbEYqHAdXH02vxJZGwNADtMWpyYm8h+fEQkpPIgErD4NhmA== dependencies: - "@matterlabs/hardhat-zksync-solc" "^1.0.5" - chalk "4.1.2" - ts-morph "^19.0.0" - -"@matterlabs/hardhat-zksync-deploy@^1.3.0": - version "1.3.0" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-deploy/-/hardhat-zksync-deploy-1.3.0.tgz#5c2b723318ddf6c4d3929ec225401864ff54557a" - integrity sha512-4UHOgOwIBC4JA3W8DE9GHqbAuBhCPAjtM+Oew1aiYYGkIsPUAMYsH35+4I2FzJsYyE6mD6ATmoS/HfZweQHTlQ== - dependencies: - "@matterlabs/hardhat-zksync-solc" "^1.0.4" - chai "^4.3.6" - chalk "4.1.2" + "@matterlabs/hardhat-zksync-solc" "^1.2.0" + chai "^4.3.4" + chalk "^4.1.2" fs-extra "^11.2.0" - glob "^10.3.10" + glob "^10.4.1" lodash "^4.17.21" - sinon "^17.0.1" + sinon "^18.0.0" sinon-chai "^3.7.0" - ts-morph "^21.0.1" + ts-morph "^22.0.0" "@matterlabs/hardhat-zksync-node@^0.0.1-beta.7": version "0.0.1" @@ -1789,7 +1780,7 @@ chalk "4.1.2" dockerode "^3.3.4" -"@matterlabs/hardhat-zksync-solc@^1.0.4", "@matterlabs/hardhat-zksync-solc@^1.0.5", "@matterlabs/hardhat-zksync-solc@^1.1.4": +"@matterlabs/hardhat-zksync-solc@^1.0.5", "@matterlabs/hardhat-zksync-solc@^1.1.4": version "1.1.4" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-1.1.4.tgz#04a2fad6fb6b6944c64ad969080ee65b9af3f617" integrity sha512-4/usbogh9neewR2/v8Dn2OzqVblZMUuT/iH2MyPZgPRZYQlL4SlZtMvokU9UQjZT6iSoaKCbbdWESHDHSzfUjA== @@ -1823,10 +1814,10 @@ sinon-chai "^3.7.0" undici "^6.18.2" -"@matterlabs/hardhat-zksync-verify@^0.4.0": - version "0.4.0" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-verify/-/hardhat-zksync-verify-0.4.0.tgz#f812c19950022fc36728f3796f6bdae5633e2fcd" - integrity sha512-GPZmAumFl3ZMPKbECX7Qw8CriwZKWd1DlCRhoG/6YYc6mFy4+MXkF1XsHLMs5r34N+GDOfbVZVMeftIlJC96Kg== +"@matterlabs/hardhat-zksync-solc@^1.2.4": + version "1.2.5" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-1.2.5.tgz#fbeeabc3fea0dd232fa3c8cb31bd93c103eba11a" + integrity sha512-iZyznWl1Hoe/Z46hnUe1s2drBZBjJOS/eN+Ql2lIBX9B6NevBl9DYzkKzH5HEIMCLGnX9sWpRAJqUQJWy9UB6w== dependencies: "@nomiclabs/hardhat-docker" "^2.0.2" chai "^4.3.4" @@ -1871,20 +1862,20 @@ sinon "^18.0.0" sinon-chai "^3.7.0" -"@matterlabs/hardhat-zksync-vyper@^1.0.8": - version "1.0.8" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-vyper/-/hardhat-zksync-vyper-1.0.8.tgz#d5bd496715a1e322b0bf3926b4146b4e18ab64ff" - integrity sha512-XR7rbfDuBG5/LZWYfhQTP9gD+U24hSJHDuZ9U55wgIfiQTOxPoztFwEbQNiC39vjT5MjP/Nv8/IDrlEBkaVCgw== +"@matterlabs/hardhat-zksync-vyper@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-vyper/-/hardhat-zksync-vyper-1.1.0.tgz#b3fb304429e88a84b4abc3fe4e5a83b2f5e907bd" + integrity sha512-zDjHPeIuHRpumXiWZUbhoji4UJe09jTDRn4xnxsuVkLH7qLAm0VDFzCXYNMvEuySZSdhbSbekxJsH9Kunc5ycA== dependencies: - "@nomiclabs/hardhat-docker" "^2.0.0" - chai "^4.3.6" - chalk "4.1.2" + "@nomiclabs/hardhat-docker" "^2.0.2" + chai "^4.3.4" + chalk "^4.1.2" dockerode "^4.0.2" - fs-extra "^11.1.1" - semver "^7.5.4" - sinon "^17.0.1" + fs-extra "^11.2.0" + semver "^7.6.2" + sinon "^18.0.0" sinon-chai "^3.7.0" - undici "^5.14.0" + undici "^6.18.2" "@matterlabs/prettier-config@^1.0.3": version "1.0.3" @@ -2659,10 +2650,10 @@ mkdirp "^2.1.6" path-browserify "^1.0.1" -"@ts-morph/common@~0.22.0": - version "0.22.0" - resolved "https://registry.yarnpkg.com/@ts-morph/common/-/common-0.22.0.tgz#8951d451622a26472fbc3a227d6c3a90e687a683" - integrity sha512-HqNBuV/oIlMKdkLshXd1zKBqNQCsuPEsgQOkfFQ/eUKjRlwndXW1AjN9LVkBEIukm00gGXSRmfkl0Wv5VXLnlw== +"@ts-morph/common@~0.23.0": + version "0.23.0" + resolved "https://registry.yarnpkg.com/@ts-morph/common/-/common-0.23.0.tgz#bd4ddbd3f484f29476c8bd985491592ae5fc147e" + integrity sha512-m7Lllj9n/S6sOkCkRftpM7L24uvmfXQFedlW/4hENcuJH1HHm9u5EgxZb9uVjQSCGrbBWBkOGgcTxNg36r6ywA== dependencies: fast-glob "^3.3.2" minimatch "^9.0.3" @@ -3342,9 +3333,9 @@ ansi-regex@^5.0.1: integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== ansi-regex@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.0.1.tgz#3183e38fae9a65d7cb5e53945cd5897d0260a06a" - integrity sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA== + version "6.1.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.1.0.tgz#95ec409c69619d6cb1b8b34f14b660ef28ebd654" + integrity sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA== ansi-styles@^3.2.1: version "3.2.1" @@ -4229,6 +4220,11 @@ code-block-writer@^12.0.0: resolved "https://registry.yarnpkg.com/code-block-writer/-/code-block-writer-12.0.0.tgz#4dd58946eb4234105aff7f0035977b2afdc2a770" integrity sha512-q4dMFMlXtKR3XNBHyMHt/3pwYNA69EDk00lloMOaaUMKPUXBw6lpXtbu3MMVG6/uOihGnRDOlkyqsONEUj60+w== +code-block-writer@^13.0.1: + version "13.0.3" + resolved "https://registry.yarnpkg.com/code-block-writer/-/code-block-writer-13.0.3.tgz#90f8a84763a5012da7af61319dd638655ae90b5b" + integrity sha512-Oofo0pq3IKnsFtuHqSF7TqBfr71aeyZDVJ0HpmqB7FBM2qEigL0iPONSCZSO9pE9dZTAxANe5XHG9Uy0YMv8cg== + collect-v8-coverage@^1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz#c0b29bcd33bcd0779a1344c2136051e6afd3d9e9" @@ -5831,9 +5827,9 @@ for-each@^0.3.3: is-callable "^1.1.3" foreground-child@^3.1.0: - version "3.1.1" - resolved "https://registry.yarnpkg.com/foreground-child/-/foreground-child-3.1.1.tgz#1d173e776d75d2772fed08efe4a0de1ea1b12d0d" - integrity sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg== + version "3.3.0" + resolved "https://registry.yarnpkg.com/foreground-child/-/foreground-child-3.3.0.tgz#0ac8644c06e431439f8561db8ecf29a7b5519c77" + integrity sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg== dependencies: cross-spawn "^7.0.0" signal-exit "^4.0.1" @@ -6126,16 +6122,17 @@ glob@8.1.0, glob@^8.0.3: minimatch "^5.0.1" once "^1.3.0" -glob@^10.3.10: - version "10.3.16" - resolved "https://registry.yarnpkg.com/glob/-/glob-10.3.16.tgz#bf6679d5d51279c8cfae4febe0d051d2a4bf4c6f" - integrity sha512-JDKXl1DiuuHJ6fVS2FXjownaavciiHNUU4mOvV/B793RLh05vZL1rcPnCSaOgv1hDT6RDlY7AB7ZUvFYAtPgAw== +glob@^10.4.1: + version "10.4.5" + resolved "https://registry.yarnpkg.com/glob/-/glob-10.4.5.tgz#f4d9f0b90ffdbab09c9d77f5f29b4262517b0956" + integrity sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg== dependencies: foreground-child "^3.1.0" jackspeak "^3.1.2" - minimatch "^9.0.1" - minipass "^7.0.4" - path-scurry "^1.11.0" + minimatch "^9.0.4" + minipass "^7.1.2" + package-json-from-dist "^1.0.0" + path-scurry "^1.11.1" glob@^5.0.15: version "5.0.15" @@ -7050,9 +7047,9 @@ istanbul-reports@^3.1.3: istanbul-lib-report "^3.0.0" jackspeak@^3.1.2: - version "3.1.2" - resolved "https://registry.yarnpkg.com/jackspeak/-/jackspeak-3.1.2.tgz#eada67ea949c6b71de50f1b09c92a961897b90ab" - integrity sha512-kWmLKn2tRtfYMF/BakihVVRzBKOxz4gJMiL2Rj91WnAB5TPZumSH99R/Yf1qE1u4uRimvCSJfm6hnxohXeEXjQ== + version "3.4.3" + resolved "https://registry.yarnpkg.com/jackspeak/-/jackspeak-3.4.3.tgz#8833a9d89ab4acde6188942bd1c53b6390ed5a8a" + integrity sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw== dependencies: "@isaacs/cliui" "^8.0.2" optionalDependencies: @@ -7962,9 +7959,9 @@ lowercase-keys@^3.0.0: integrity sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ== lru-cache@^10.2.0: - version "10.2.2" - resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-10.2.2.tgz#48206bc114c1252940c41b25b41af5b545aca878" - integrity sha512-9hp3Vp2/hFQUiIwKo8XCeFVnrg8Pk3TYNPIR7tJADKi5YfcF7vEaK7avFHTlSy3kOKYaJQaalfEo6YuXdceBOQ== + version "10.4.3" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-10.4.3.tgz#410fc8a17b70e598013df257c2446b7f3383f119" + integrity sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ== lru-cache@^5.1.1: version "5.1.1" @@ -8264,10 +8261,10 @@ minimatch@^7.4.3: dependencies: brace-expansion "^2.0.1" -minimatch@^9.0.1, minimatch@^9.0.3: - version "9.0.4" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.4.tgz#8e49c731d1749cbec05050ee5145147b32496a51" - integrity sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw== +minimatch@^9.0.3, minimatch@^9.0.4: + version "9.0.5" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.5.tgz#d74f9dd6b57d83d8e98cfb82133b03978bc929e5" + integrity sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow== dependencies: brace-expansion "^2.0.1" @@ -8283,10 +8280,10 @@ minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6, minimist@^1.2.8, minimist@~1. resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== -"minipass@^5.0.0 || ^6.0.2 || ^7.0.0", minipass@^7.0.4: - version "7.1.1" - resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.1.tgz#f7f85aff59aa22f110b20e27692465cf3bf89481" - integrity sha512-UZ7eQ+h8ywIRAW1hIEl2AqdwzJucU/Kp59+8kkZeSvafXhZjul247BvIJjEVFVeON6d7lM46XX1HXCduKAS8VA== +"minipass@^5.0.0 || ^6.0.2 || ^7.0.0", minipass@^7.1.2: + version "7.1.2" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.2.tgz#93a9626ce5e5e66bd4db86849e7515e92340a707" + integrity sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw== mkdirp-classic@^0.5.2: version "0.5.3" @@ -8770,15 +8767,10 @@ p-try@^2.0.0: resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== -package-json@^8.1.0: - version "8.1.1" - resolved "https://registry.yarnpkg.com/package-json/-/package-json-8.1.1.tgz#3e9948e43df40d1e8e78a85485f1070bf8f03dc8" - integrity sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA== - dependencies: - got "^12.1.0" - registry-auth-token "^5.0.1" - registry-url "^6.0.0" - semver "^7.3.7" +package-json-from-dist@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz#4f1471a010827a86f94cfd9b0727e36d267de505" + integrity sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw== package-json@^8.1.0: version "8.1.1" @@ -8855,7 +8847,7 @@ path-parse@^1.0.6, path-parse@^1.0.7: resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== -path-scurry@^1.11.0: +path-scurry@^1.11.1: version "1.11.1" resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-1.11.1.tgz#7960a668888594a0720b12a911d1a742ab9f11d2" integrity sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA== @@ -10199,7 +10191,7 @@ string-length@^4.0.1: char-regex "^1.0.2" strip-ansi "^6.0.0" -"string-width-cjs@npm:string-width@^4.2.0", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: +"string-width-cjs@npm:string-width@^4.2.0": version "4.2.3" resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -10216,6 +10208,15 @@ string-width@^2.1.0, string-width@^2.1.1: is-fullwidth-code-point "^2.0.0" strip-ansi "^4.0.0" +string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: + version "4.2.3" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" + string-width@^5.0.1, string-width@^5.1.2: version "5.1.2" resolved "https://registry.yarnpkg.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794" @@ -10282,7 +10283,7 @@ string_decoder@~1.1.1: dependencies: safe-buffer "~5.1.0" -"strip-ansi-cjs@npm:strip-ansi@^6.0.1", strip-ansi@^6.0.0, strip-ansi@^6.0.1: +"strip-ansi-cjs@npm:strip-ansi@^6.0.1": version "6.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== @@ -10303,6 +10304,13 @@ strip-ansi@^5.1.0: dependencies: ansi-regex "^4.1.0" +strip-ansi@^6.0.0, strip-ansi@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + strip-ansi@^7.0.1: version "7.1.0" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.1.0.tgz#d5b6568ca689d8561370b0707685d22434faff45" @@ -10413,6 +10421,7 @@ synckit@^0.8.6: fast-glob "^3.3.2" hardhat "=2.22.2" preprocess "^3.2.0" + zksync-ethers "^5.9.0" table-layout@^1.0.2: version "1.0.2" @@ -10664,13 +10673,13 @@ ts-morph@^19.0.0: "@ts-morph/common" "~0.20.0" code-block-writer "^12.0.0" -ts-morph@^21.0.1: - version "21.0.1" - resolved "https://registry.yarnpkg.com/ts-morph/-/ts-morph-21.0.1.tgz#712302a0f6e9dbf1aa8d9cf33a4386c4b18c2006" - integrity sha512-dbDtVdEAncKctzrVZ+Nr7kHpHkv+0JDJb2MjjpBaj8bFeCkePU9rHfMklmhuLFnpeq/EJZk2IhStY6NzqgjOkg== +ts-morph@^22.0.0: + version "22.0.0" + resolved "https://registry.yarnpkg.com/ts-morph/-/ts-morph-22.0.0.tgz#5532c592fb6dddae08846f12c9ab0fc590b1d42e" + integrity sha512-M9MqFGZREyeb5fTl6gNHKZLqBQA0TjA1lea+CR48R8EBTDuWrNqW6ccC5QvjNR4s6wDumD3LTCjOFSp9iwlzaw== dependencies: - "@ts-morph/common" "~0.22.0" - code-block-writer "^12.0.0" + "@ts-morph/common" "~0.23.0" + code-block-writer "^13.0.1" ts-node@^10.1.0, ts-node@^10.7.0: version "10.9.2" @@ -11152,7 +11161,16 @@ workerpool@6.2.1: resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.2.1.tgz#46fc150c17d826b86a008e5a4508656777e9c343" integrity sha512-ILEIE97kDZvF9Wb9f6h5aXK4swSlKGUcOEGiIYb2OOu/IrDU9iwj0fD//SsA6E5ibwJxpEvhullJY4Sl4GcpAw== -"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0", wrap-ansi@^7.0.0: +"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + +wrap-ansi@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== diff --git a/zkstack_cli/Cargo.lock b/zkstack_cli/Cargo.lock index 8750de36c753..2206a1052f59 100644 --- a/zkstack_cli/Cargo.lock +++ b/zkstack_cli/Cargo.lock @@ -297,12 +297,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "base16ct" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" - [[package]] name = "base16ct" version = "0.2.0" @@ -609,6 +603,15 @@ dependencies = [ "terminal_size", ] +[[package]] +name = "clap_complete" +version = "4.5.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9646e2e245bf62f45d39a0f3f36f1171ad1ea0d6967fd114bca72cb02a8fcdfb" +dependencies = [ + "clap", +] + [[package]] name = "clap_derive" version = "4.5.18" @@ -650,7 +653,7 @@ dependencies = [ "coins-core", "digest", "hmac", - "k256 0.13.4", + "k256", "serde", "sha2", "thiserror", @@ -786,6 +789,15 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "const-decoder" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b381abde2cdc1bc3817e394b24e05667a2dc89f37570cbd34d9c397d99e56e3f" +dependencies = [ + "compile-fmt", +] + [[package]] name = "const-hex" version = "1.13.1" @@ -920,18 +932,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" -[[package]] -name = "crypto-bigint" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" -dependencies = [ - "generic-array", - "rand_core", - "subtle", - "zeroize", -] - [[package]] name = "crypto-bigint" version = "0.5.5" @@ -1051,16 +1051,6 @@ dependencies = [ "uuid 1.10.0", ] -[[package]] -name = "der" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" -dependencies = [ - "const-oid", - "zeroize", -] - [[package]] name = "der" version = "0.7.9" @@ -1079,7 +1069,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", - "serde", ] [[package]] @@ -1204,30 +1193,18 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" -[[package]] -name = "ecdsa" -version = "0.14.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" -dependencies = [ - "der 0.6.1", - "elliptic-curve 0.12.3", - "rfc6979 0.3.1", - "signature 1.6.4", -] - [[package]] name = "ecdsa" version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der 0.7.9", + "der", "digest", - "elliptic-curve 0.13.8", - "rfc6979 0.4.0", - "signature 2.2.0", - "spki 0.7.3", + "elliptic-curve", + "rfc6979", + "signature", + "spki", ] [[package]] @@ -1236,8 +1213,8 @@ version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ - "pkcs8 0.10.2", - "signature 2.2.0", + "pkcs8", + "signature", ] [[package]] @@ -1264,41 +1241,21 @@ dependencies = [ "serde", ] -[[package]] -name = "elliptic-curve" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" -dependencies = [ - "base16ct 0.1.1", - "crypto-bigint 0.4.9", - "der 0.6.1", - "digest", - "ff 0.12.1", - "generic-array", - "group 0.12.1", - "pkcs8 0.9.0", - "rand_core", - "sec1 0.3.0", - "subtle", - "zeroize", -] - [[package]] name = "elliptic-curve" version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ - "base16ct 0.2.0", - "crypto-bigint 0.5.5", + "base16ct", + "crypto-bigint", "digest", - "ff 0.13.0", + "ff", "generic-array", - "group 0.13.0", - "pkcs8 0.10.2", + "group", + "pkcs8", "rand_core", - "sec1 0.7.3", + "sec1", "subtle", "zeroize", ] @@ -1345,7 +1302,7 @@ dependencies = [ "base64 0.21.7", "bytes", "hex", - "k256 0.13.4", + "k256", "log", "rand", "rlp", @@ -1570,11 +1527,11 @@ dependencies = [ "cargo_metadata", "chrono", "const-hex", - "elliptic-curve 0.13.8", + "elliptic-curve", "ethabi", "generic-array", - "k256 0.13.4", - "num_enum 0.7.3", + "k256", + "num_enum", "once_cell", "open-fastrlp", "rand", @@ -1679,7 +1636,7 @@ dependencies = [ "coins-bip32", "coins-bip39", "const-hex", - "elliptic-curve 0.13.8", + "elliptic-curve", "eth-keystore", "ethers-core", "rand", @@ -1758,16 +1715,6 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "rand_core", - "subtle", -] - [[package]] name = "ff" version = "0.13.0" @@ -2065,24 +2012,13 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff 0.12.1", - "rand_core", - "subtle", -] - [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff 0.13.0", + "ff", "rand_core", "subtle", ] @@ -2662,18 +2598,6 @@ dependencies = [ "simple_asn1", ] -[[package]] -name = "k256" -version = "0.11.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" -dependencies = [ - "cfg-if", - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2", -] - [[package]] name = "k256" version = "0.13.4" @@ -2681,11 +2605,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", - "ecdsa 0.16.9", - "elliptic-curve 0.13.8", + "ecdsa", + "elliptic-curve", "once_cell", "sha2", - "signature 2.2.0", + "signature", ] [[package]] @@ -3113,34 +3037,13 @@ dependencies = [ "libc", ] -[[package]] -name = "num_enum" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a015b430d3c108a207fd776d2e2196aaf8b1cf8cf93253e3a097ff3085076a1" -dependencies = [ - "num_enum_derive 0.6.1", -] - [[package]] name = "num_enum" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" dependencies = [ - "num_enum_derive 0.7.3", -] - -[[package]] -name = "num_enum_derive" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" -dependencies = [ - "proc-macro-crate 1.3.1", - "proc-macro2", - "quote", - "syn 2.0.79", + "num_enum_derive", ] [[package]] @@ -3149,7 +3052,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ - "proc-macro-crate 3.2.0", + "proc-macro-crate", "proc-macro2", "quote", "syn 2.0.79", @@ -3395,7 +3298,7 @@ version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "proc-macro-crate 3.2.0", + "proc-macro-crate", "proc-macro2", "quote", "syn 1.0.109", @@ -3610,19 +3513,9 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" dependencies = [ - "der 0.7.9", - "pkcs8 0.10.2", - "spki 0.7.3", -] - -[[package]] -name = "pkcs8" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" -dependencies = [ - "der 0.6.1", - "spki 0.6.0", + "der", + "pkcs8", + "spki", ] [[package]] @@ -3631,8 +3524,8 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.9", - "spki 0.7.3", + "der", + "spki", ] [[package]] @@ -3702,23 +3595,13 @@ dependencies = [ "uint", ] -[[package]] -name = "proc-macro-crate" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" -dependencies = [ - "once_cell", - "toml_edit 0.19.15", -] - [[package]] name = "proc-macro-crate" version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.22.22", + "toml_edit", ] [[package]] @@ -4123,17 +4006,6 @@ dependencies = [ "windows-registry", ] -[[package]] -name = "rfc6979" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" -dependencies = [ - "crypto-bigint 0.4.9", - "hmac", - "zeroize", -] - [[package]] name = "rfc6979" version = "0.4.0" @@ -4217,10 +4089,10 @@ dependencies = [ "num-integer", "num-traits", "pkcs1", - "pkcs8 0.10.2", + "pkcs8", "rand_core", - "signature 2.2.0", - "spki 0.7.3", + "signature", + "spki", "subtle", "zeroize", ] @@ -4383,7 +4255,7 @@ version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ - "proc-macro-crate 3.2.0", + "proc-macro-crate", "proc-macro2", "quote", "syn 1.0.109", @@ -4426,30 +4298,16 @@ dependencies = [ "untrusted 0.9.0", ] -[[package]] -name = "sec1" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" -dependencies = [ - "base16ct 0.1.1", - "der 0.6.1", - "generic-array", - "pkcs8 0.9.0", - "subtle", - "zeroize", -] - [[package]] name = "sec1" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "base16ct 0.2.0", - "der 0.7.9", + "base16ct", + "der", "generic-array", - "pkcs8 0.10.2", + "pkcs8", "subtle", "zeroize", ] @@ -4754,17 +4612,6 @@ dependencies = [ "digest", ] -[[package]] -name = "sha2_ce" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca2daa77078f4ddff27e75c4bf59e4c2697525f56dbb3c842d34a5d1f2b04a2" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest", -] - [[package]] name = "sha3" version = "0.10.8" @@ -4775,16 +4622,6 @@ dependencies = [ "keccak", ] -[[package]] -name = "sha3_ce" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34c9a08202c50378d8a07a5f458193a5f542d2828ac6640263dbc0c2533ea25e" -dependencies = [ - "digest", - "keccak", -] - [[package]] name = "sharded-slab" version = "0.1.7" @@ -4809,16 +4646,6 @@ dependencies = [ "libc", ] -[[package]] -name = "signature" -version = "1.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" -dependencies = [ - "digest", - "rand_core", -] - [[package]] name = "signature" version = "2.2.0" @@ -4936,16 +4763,6 @@ dependencies = [ "lock_api", ] -[[package]] -name = "spki" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" -dependencies = [ - "base64ct", - "der 0.6.1", -] - [[package]] name = "spki" version = "0.7.3" @@ -4953,7 +4770,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der 0.7.9", + "der", ] [[package]] @@ -5683,7 +5500,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.22", + "toml_edit", ] [[package]] @@ -5695,17 +5512,6 @@ dependencies = [ "serde", ] -[[package]] -name = "toml_edit" -version = "0.19.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" -dependencies = [ - "indexmap 2.6.0", - "toml_datetime", - "winnow 0.5.40", -] - [[package]] name = "toml_edit" version = "0.22.22" @@ -5716,7 +5522,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.20", + "winnow", ] [[package]] @@ -6513,15 +6319,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" -[[package]] -name = "winnow" -version = "0.5.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" -dependencies = [ - "memchr", -] - [[package]] name = "winnow" version = "0.6.20" @@ -6657,50 +6454,6 @@ dependencies = [ "zstd", ] -[[package]] -name = "zk_evm" -version = "0.133.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9af08e9284686a1b0c89ec4931eb915ac0729367f1247abd06164874fe738106" -dependencies = [ - "anyhow", - "lazy_static", - "num", - "serde", - "serde_json", - "static_assertions", - "zk_evm_abstractions", - "zkevm_opcode_defs", -] - -[[package]] -name = "zk_evm_abstractions" -version = "0.140.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be696258861eba4e6625a5665084b2266720bb67f4ba69819469700ac5c6a401" -dependencies = [ - "anyhow", - "num_enum 0.6.1", - "serde", - "static_assertions", - "zkevm_opcode_defs", -] - -[[package]] -name = "zkevm_opcode_defs" -version = "0.132.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0769f7b27d8fb06e715da3290c575cac5d04d10a557faef180e847afce50ac4" -dependencies = [ - "bitflags 2.6.0", - "blake2", - "ethereum-types", - "k256 0.11.6", - "lazy_static", - "sha2_ce", - "sha3_ce", -] - [[package]] name = "zkstack" version = "0.1.0" @@ -6709,11 +6462,12 @@ dependencies = [ "chrono", "clap", "clap-markdown", + "clap_complete", "cliclack", "common", "config", + "dirs", "ethers", - "eyre", "futures", "human-panic", "lazy_static", @@ -6749,13 +6503,15 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", + "const-decoder", "ethabi", "hex", - "num_enum 0.7.3", + "num_enum", "secrecy", "serde", "serde_json", "serde_with", + "sha2", "strum", "thiserror", "tiny-keccak", @@ -6789,11 +6545,6 @@ dependencies = [ "rand", "secrecy", "serde", - "strum", - "strum_macros", - "time", - "url", - "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -6809,9 +6560,9 @@ dependencies = [ "anyhow", "blst", "ed25519-dalek", - "elliptic-curve 0.13.8", + "elliptic-curve", "hex", - "k256 0.13.4", + "k256", "num-bigint", "num-traits", "rand", @@ -6860,11 +6611,11 @@ name = "zksync_contracts" version = "0.1.0" dependencies = [ "envy", - "ethabi", "hex", "once_cell", "serde", "serde_json", + "zksync_basic_types", "zksync_utils", ] @@ -6882,7 +6633,6 @@ dependencies = [ "sha2", "thiserror", "zksync_basic_types", - "zksync_utils", ] [[package]] @@ -6943,7 +6693,6 @@ dependencies = [ "secrecy", "serde_json", "serde_yaml", - "time", "tracing", "zksync_basic_types", "zksync_config", @@ -6958,7 +6707,6 @@ version = "0.1.0" dependencies = [ "once_cell", "zksync_basic_types", - "zksync_utils", ] [[package]] @@ -6973,11 +6721,10 @@ dependencies = [ "hex", "itertools 0.10.5", "num", - "num_enum 0.7.3", + "num_enum", "once_cell", "prost 0.12.6", "rlp", - "secp256k1", "serde", "serde_json", "serde_with", @@ -6985,14 +6732,12 @@ dependencies = [ "thiserror", "tracing", "zksync_basic_types", - "zksync_config", "zksync_contracts", "zksync_crypto_primitives", "zksync_mini_merkle_tree", "zksync_protobuf", "zksync_protobuf_build", "zksync_system_constants", - "zksync_utils", ] [[package]] @@ -7000,19 +6745,12 @@ name = "zksync_utils" version = "0.1.0" dependencies = [ "anyhow", - "bigdecimal", "futures", - "hex", - "num", "once_cell", "reqwest 0.12.8", - "serde", "serde_json", - "thiserror", "tokio", "tracing", - "zk_evm", - "zksync_basic_types", "zksync_vlog", ] diff --git a/zkstack_cli/Cargo.toml b/zkstack_cli/Cargo.toml index a805cf85d518..1f493f9c3e41 100644 --- a/zkstack_cli/Cargo.toml +++ b/zkstack_cli/Cargo.toml @@ -40,11 +40,12 @@ zksync_protobuf_build = "=0.5.0" # External dependencies anyhow = "1.0.82" clap = { version = "4.4", features = ["derive", "wrap_help", "string"] } +clap_complete = "4.5.33" +dirs = "5.0.1" slugify-rs = "0.0.3" cliclack = "0.2.5" console = "0.15.8" chrono = "0.4.38" -eyre = "0.6.12" ethers = "2.0" futures = "0.3.30" human-panic = "2.0" diff --git a/zkstack_cli/README.md b/zkstack_cli/README.md index f1c92cc3d2e3..e81165088218 100644 --- a/zkstack_cli/README.md +++ b/zkstack_cli/README.md @@ -30,6 +30,16 @@ zkstackup --local This command installs `zkstack` from the current repository. +#### Manual installation + +Run from the repository root: + +```bash +cargo install --path zkstack_cli/crates/zkstack --force --locked +``` + +And make sure that `.cargo/bin` is included into `PATH`. + ### Foundry Integration Foundry is used for deploying smart contracts. Pass flags for Foundry integration with the `-a` option, e.g., diff --git a/zkstack_cli/crates/common/src/contracts.rs b/zkstack_cli/crates/common/src/contracts.rs index b3c6688b9c70..7ba5a1ab333f 100644 --- a/zkstack_cli/crates/common/src/contracts.rs +++ b/zkstack_cli/crates/common/src/contracts.rs @@ -4,12 +4,6 @@ use xshell::{cmd, Shell}; use crate::cmd::Cmd; -pub fn build_test_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { - let _dir_guard = shell.push_dir(link_to_code.join("etc/contracts-test-data")); - Cmd::new(cmd!(shell, "yarn install")).run()?; - Ok(Cmd::new(cmd!(shell, "yarn build")).run()?) -} - pub fn build_l1_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { let _dir_guard = shell.push_dir(link_to_code.join("contracts/l1-contracts")); Ok(Cmd::new(cmd!(shell, "forge build")).run()?) @@ -26,18 +20,7 @@ pub fn build_l2_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result pub fn build_system_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { let _dir_guard = shell.push_dir(link_to_code.join("contracts/system-contracts")); - Cmd::new(cmd!(shell, "yarn install")).run()?; + // Do not update era-contract's lockfile to avoid dirty submodule + Cmd::new(cmd!(shell, "yarn install --frozen-lockfile")).run()?; Ok(Cmd::new(cmd!(shell, "yarn build")).run()?) - // Cmd::new(cmd!(shell, "yarn preprocess:system-contracts")).run()?; - // Cmd::new(cmd!( - // shell, - // "forge build --zksync --zk-enable-eravm-extensions" - // )) - // .run()?; - // Cmd::new(cmd!(shell, "yarn preprocess:bootloader")).run()?; - // Ok(Cmd::new(cmd!( - // shell, - // "forge build --zksync --zk-enable-eravm-extensions" - // )) - // .run()?) } diff --git a/zkstack_cli/crates/common/src/docker.rs b/zkstack_cli/crates/common/src/docker.rs index a5731808814f..71e2040ee31c 100644 --- a/zkstack_cli/crates/common/src/docker.rs +++ b/zkstack_cli/crates/common/src/docker.rs @@ -14,7 +14,11 @@ pub fn up(shell: &Shell, docker_compose_file: &str, detach: bool) -> anyhow::Res } pub fn down(shell: &Shell, docker_compose_file: &str) -> anyhow::Result<()> { - Ok(Cmd::new(cmd!(shell, "docker compose -f {docker_compose_file} down")).run()?) + Ok(Cmd::new(cmd!( + shell, + "docker compose -f {docker_compose_file} down -v" + )) + .run()?) } pub fn run(shell: &Shell, docker_image: &str, docker_args: Vec) -> anyhow::Result<()> { diff --git a/zkstack_cli/crates/common/src/term/spinner.rs b/zkstack_cli/crates/common/src/term/spinner.rs index b97ba075ac45..3ec2631804af 100644 --- a/zkstack_cli/crates/common/src/term/spinner.rs +++ b/zkstack_cli/crates/common/src/term/spinner.rs @@ -1,34 +1,40 @@ -use std::time::Instant; +use std::{fmt::Display, io::IsTerminal, time::Instant}; use cliclack::{spinner, ProgressBar}; -use crate::config::global_config; +use crate::{config::global_config, logger}; /// Spinner is a helper struct to show a spinner while some operation is running. pub struct Spinner { msg: String, - pb: ProgressBar, + output: SpinnerOutput, time: Instant, } impl Spinner { /// Create a new spinner with a message. pub fn new(msg: &str) -> Self { - let pb = spinner(); - pb.start(msg); - if global_config().verbose { - pb.stop(msg); - } + let output = if std::io::stdout().is_terminal() { + let pb = spinner(); + pb.start(msg); + if global_config().verbose { + pb.stop(msg); + } + SpinnerOutput::Progress(pb) + } else { + logger::info(msg); + SpinnerOutput::Plain() + }; Spinner { msg: msg.to_owned(), - pb, + output, time: Instant::now(), } } /// Manually finish the spinner. pub fn finish(self) { - self.pb.stop(format!( + self.output.stop(format!( "{} done in {} secs", self.msg, self.time.elapsed().as_secs_f64() @@ -37,7 +43,7 @@ impl Spinner { /// Interrupt the spinner with a failed message. pub fn fail(self) { - self.pb.error(format!( + self.output.error(format!( "{} failed in {} secs", self.msg, self.time.elapsed().as_secs_f64() @@ -46,6 +52,33 @@ impl Spinner { /// Freeze the spinner with current message. pub fn freeze(self) { - self.pb.stop(self.msg); + self.output.stop(self.msg); + } +} + +/// An abstraction that makes interactive progress bar optional in environments where virtual +/// terminal is not available. +/// +/// Uses plain `logger::{info,error}` as the fallback. +/// +/// See https://github.com/console-rs/indicatif/issues/530 for more details. +enum SpinnerOutput { + Progress(ProgressBar), + Plain(), +} + +impl SpinnerOutput { + fn error(&self, msg: impl Display) { + match self { + SpinnerOutput::Progress(pb) => pb.error(msg), + SpinnerOutput::Plain() => logger::error(msg), + } + } + + fn stop(self, msg: impl Display) { + match self { + SpinnerOutput::Progress(pb) => pb.stop(msg), + SpinnerOutput::Plain() => logger::info(msg), + } } } diff --git a/zkstack_cli/crates/config/src/chain.rs b/zkstack_cli/crates/config/src/chain.rs index 6c82d6ef3c37..c8fa0717dff5 100644 --- a/zkstack_cli/crates/config/src/chain.rs +++ b/zkstack_cli/crates/config/src/chain.rs @@ -40,6 +40,8 @@ pub struct ChainConfigInternal { pub wallet_creation: WalletCreation, #[serde(skip_serializing_if = "Option::is_none")] pub legacy_bridge: Option, + #[serde(default)] // for backward compatibility + pub evm_emulator: bool, } /// Chain configuration file. This file is created in the chain @@ -61,6 +63,7 @@ pub struct ChainConfig { pub wallet_creation: WalletCreation, pub shell: OnceCell, pub legacy_bridge: Option, + pub evm_emulator: bool, } impl Serialize for ChainConfig { @@ -157,6 +160,7 @@ impl ChainConfig { base_token: self.base_token.clone(), wallet_creation: self.wallet_creation, legacy_bridge: self.legacy_bridge, + evm_emulator: self.evm_emulator, } } } diff --git a/zkstack_cli/crates/config/src/contracts.rs b/zkstack_cli/crates/config/src/contracts.rs index e6676989e68c..79044a59f3af 100644 --- a/zkstack_cli/crates/config/src/contracts.rs +++ b/zkstack_cli/crates/config/src/contracts.rs @@ -7,7 +7,7 @@ use crate::{ deploy_ecosystem::output::DeployL1Output, deploy_l2_contracts::output::{ ConsensusRegistryOutput, DefaultL2UpgradeOutput, InitializeBridgeOutput, - Multicall3Output, + Multicall3Output, TimestampAsserterOutput, }, register_chain::output::RegisterChainOutput, }, @@ -85,6 +85,7 @@ impl ContractsConfig { ) -> anyhow::Result<()> { self.bridges.shared.l2_address = Some(initialize_bridges_output.l2_shared_bridge_proxy); self.bridges.erc20.l2_address = Some(initialize_bridges_output.l2_shared_bridge_proxy); + self.l2.legacy_shared_bridge_addr = Some(initialize_bridges_output.l2_shared_bridge_proxy); Ok(()) } @@ -108,6 +109,14 @@ impl ContractsConfig { self.l2.multicall3 = Some(multicall3_output.multicall3); Ok(()) } + + pub fn set_timestamp_asserter_addr( + &mut self, + timestamp_asserter_output: &TimestampAsserterOutput, + ) -> anyhow::Result<()> { + self.l2.timestamp_asserter_addr = Some(timestamp_asserter_output.timestamp_asserter); + Ok(()) + } } impl FileConfigWithDefaultName for ContractsConfig { @@ -159,4 +168,6 @@ pub struct L2Contracts { pub default_l2_upgrader: Address, pub consensus_registry: Option

, pub multicall3: Option
, + pub legacy_shared_bridge_addr: Option
, + pub timestamp_asserter_addr: Option
, } diff --git a/zkstack_cli/crates/config/src/ecosystem.rs b/zkstack_cli/crates/config/src/ecosystem.rs index 79cb1c4ea27d..c67aebf2a46c 100644 --- a/zkstack_cli/crates/config/src/ecosystem.rs +++ b/zkstack_cli/crates/config/src/ecosystem.rs @@ -178,6 +178,7 @@ impl EcosystemConfig { .artifacts_path .unwrap_or_else(|| self.get_chain_artifacts_path(name)), legacy_bridge: config.legacy_bridge, + evm_emulator: config.evm_emulator, }) } @@ -232,7 +233,11 @@ impl EcosystemConfig { } pub fn get_default_configs_path(&self) -> PathBuf { - self.link_to_code.join(CONFIGS_PATH) + Self::default_configs_path(&self.link_to_code) + } + + pub fn default_configs_path(link_to_code: &Path) -> PathBuf { + link_to_code.join(CONFIGS_PATH) } /// Path to the predefined ecosystem configs diff --git a/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs index 29be89b91016..7b2b56c81548 100644 --- a/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs @@ -8,6 +8,8 @@ impl ZkStackConfig for DefaultL2UpgradeOutput {} impl ZkStackConfig for ConsensusRegistryOutput {} impl ZkStackConfig for Multicall3Output {} +impl ZkStackConfig for TimestampAsserterOutput {} + #[derive(Debug, Clone, Serialize, Deserialize)] pub struct InitializeBridgeOutput { pub l2_shared_bridge_implementation: Address, @@ -29,3 +31,8 @@ pub struct ConsensusRegistryOutput { pub struct Multicall3Output { pub multicall3: Address, } + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TimestampAsserterOutput { + pub timestamp_asserter: Address, +} diff --git a/zkstack_cli/crates/config/src/genesis.rs b/zkstack_cli/crates/config/src/genesis.rs index 933252541f43..2d9ac7fcdc66 100644 --- a/zkstack_cli/crates/config/src/genesis.rs +++ b/zkstack_cli/crates/config/src/genesis.rs @@ -1,5 +1,6 @@ use std::path::Path; +use anyhow::Context as _; use xshell::Shell; use zksync_basic_types::L1ChainId; pub use zksync_config::GenesisConfig; @@ -11,11 +12,23 @@ use crate::{ ChainConfig, }; -pub fn update_from_chain_config(genesis: &mut GenesisConfig, config: &ChainConfig) { +pub fn update_from_chain_config( + genesis: &mut GenesisConfig, + config: &ChainConfig, +) -> anyhow::Result<()> { genesis.l2_chain_id = config.chain_id; // TODO(EVM-676): for now, the settlement layer is always the same as the L1 network genesis.l1_chain_id = L1ChainId(config.l1_network.chain_id()); genesis.l1_batch_commit_data_generator_mode = config.l1_batch_commit_data_generator_mode; + genesis.evm_emulator_hash = if config.evm_emulator { + Some(genesis.evm_emulator_hash.context( + "impossible to initialize a chain with EVM emulator: the template genesis config \ + does not contain EVM emulator hash", + )?) + } else { + None + }; + Ok(()) } impl FileConfigWithDefaultName for GenesisConfig { diff --git a/zkstack_cli/crates/zkstack/Cargo.toml b/zkstack_cli/crates/zkstack/Cargo.toml index a9fcecaf79b4..85ab8081eaa4 100644 --- a/zkstack_cli/crates/zkstack/Cargo.toml +++ b/zkstack_cli/crates/zkstack/Cargo.toml @@ -14,10 +14,12 @@ keywords.workspace = true anyhow.workspace = true chrono.workspace = true clap.workspace = true +clap_complete.workspace = true clap-markdown.workspace = true cliclack.workspace = true common.workspace = true config.workspace = true +dirs.workspace = true ethers.workspace = true futures.workspace = true human-panic.workspace = true @@ -49,6 +51,9 @@ rand.workspace = true zksync_consensus_utils.workspace = true [build-dependencies] -eyre.workspace = true +anyhow.workspace = true +clap_complete.workspace = true +dirs.workspace = true ethers.workspace = true +xshell.workspace = true zksync_protobuf_build.workspace = true diff --git a/zkstack_cli/crates/zkstack/README.md b/zkstack_cli/crates/zkstack/README.md index 6e529efc2009..f352d96fec42 100644 --- a/zkstack_cli/crates/zkstack/README.md +++ b/zkstack_cli/crates/zkstack/README.md @@ -508,7 +508,11 @@ Initialize prover - `--public-location ` - `--public-project-id ` - `--bellman-cuda-dir ` -- `--download-key ` +- `--bellman-cuda` + + Possible values: `true`, `false` + +- `--setup-compressor-key ` Possible values: `true`, `false` @@ -564,6 +568,10 @@ Run prover Possible values: `true`, `false` +- `--tag' - Tag of the docker image to run. + + Default value is `latest2.0` but you can specify your prefered one. + - `--round ` Possible values: `all-rounds`, `basic-circuits`, `leaf-aggregation`, `node-aggregation`, `recursion-tip`, `scheduler` diff --git a/zkstack_cli/crates/zkstack/build.rs b/zkstack_cli/crates/zkstack/build.rs index 92f34a542b7f..e52e952bf730 100644 --- a/zkstack_cli/crates/zkstack/build.rs +++ b/zkstack_cli/crates/zkstack/build.rs @@ -1,12 +1,29 @@ -use std::path::PathBuf; +use std::path::{Path, PathBuf}; +use anyhow::{anyhow, Context}; use ethers::contract::Abigen; +use xshell::{cmd, Shell}; -fn main() -> eyre::Result<()> { +const COMPLETION_DIR: &str = "completion"; + +fn main() -> anyhow::Result<()> { let outdir = PathBuf::from(std::env::var("OUT_DIR")?).canonicalize()?; - Abigen::new("ConsensusRegistry", "abi/ConsensusRegistry.json")? - .generate()? - .write_to_file(outdir.join("consensus_registry_abi.rs"))?; + Abigen::new("ConsensusRegistry", "abi/ConsensusRegistry.json") + .map_err(|_| anyhow!("Failed ABI deserialization"))? + .generate() + .map_err(|_| anyhow!("Failed ABI generation"))? + .write_to_file(outdir.join("consensus_registry_abi.rs")) + .context("Failed to write ABI to file")?; + + if let Err(e) = build_dependencies() { + println!("cargo:error=It was not possible to install projects dependencies"); + println!("cargo:error={}", e); + } + + if let Err(e) = configure_shell_autocompletion() { + println!("cargo:warning=It was not possible to install autocomplete scripts. Please generate them manually with `zkstack autocomplete`"); + println!("cargo:error={}", e); + }; zksync_protobuf_build::Config { input_root: "src/commands/consensus/proto".into(), @@ -19,3 +36,114 @@ fn main() -> eyre::Result<()> { .unwrap(); Ok(()) } + +fn configure_shell_autocompletion() -> anyhow::Result<()> { + // Array of supported shells + let shells = [ + clap_complete::Shell::Bash, + clap_complete::Shell::Fish, + clap_complete::Shell::Zsh, + ]; + + for shell in shells { + std::fs::create_dir_all(&shell.autocomplete_folder()?) + .context("it was impossible to create the configuration directory")?; + + let src = Path::new(COMPLETION_DIR).join(shell.autocomplete_file_name()?); + let dst = shell + .autocomplete_folder()? + .join(shell.autocomplete_file_name()?); + + std::fs::copy(src, dst)?; + + shell + .configure_autocomplete() + .context("failed to run extra configuration requirements")?; + } + + Ok(()) +} + +pub trait ShellAutocomplete { + fn autocomplete_folder(&self) -> anyhow::Result; + fn autocomplete_file_name(&self) -> anyhow::Result; + /// Extra steps required for shells enable command autocomplete. + fn configure_autocomplete(&self) -> anyhow::Result<()>; +} + +impl ShellAutocomplete for clap_complete::Shell { + fn autocomplete_folder(&self) -> anyhow::Result { + let home_dir = dirs::home_dir().context("missing home folder")?; + + match self { + clap_complete::Shell::Bash => Ok(home_dir.join(".bash_completion.d")), + clap_complete::Shell::Fish => Ok(home_dir.join(".config/fish/completions")), + clap_complete::Shell::Zsh => Ok(home_dir.join(".zsh/completion")), + _ => anyhow::bail!("unsupported shell"), + } + } + + fn autocomplete_file_name(&self) -> anyhow::Result { + let crate_name = env!("CARGO_PKG_NAME"); + + match self { + clap_complete::Shell::Bash => Ok(format!("{}.sh", crate_name)), + clap_complete::Shell::Fish => Ok(format!("{}.fish", crate_name)), + clap_complete::Shell::Zsh => Ok(format!("_{}.zsh", crate_name)), + _ => anyhow::bail!("unsupported shell"), + } + } + + fn configure_autocomplete(&self) -> anyhow::Result<()> { + match self { + clap_complete::Shell::Bash | clap_complete::Shell::Zsh => { + let shell = &self.to_string().to_lowercase(); + let completion_file = self + .autocomplete_folder()? + .join(self.autocomplete_file_name()?); + + // Source the completion file inside .{shell}rc + let shell_rc = dirs::home_dir() + .context("missing home directory")? + .join(format!(".{}rc", shell)); + + if shell_rc.exists() { + let shell_rc_content = std::fs::read_to_string(&shell_rc) + .context(format!("could not read .{}rc", shell))?; + + if !shell_rc_content.contains("# zkstack completion") { + std::fs::write( + shell_rc, + format!( + "{}\n# zkstack completion\nsource \"{}\"\n", + shell_rc_content, + completion_file.to_str().unwrap() + ), + ) + .context(format!("could not write .{}rc", shell))?; + } + } else { + println!( + "cargo:warning=Please add the following line to your .{}rc:", + shell + ); + println!("cargo:warning=source {}", completion_file.to_str().unwrap()); + } + } + _ => (), + } + + Ok(()) + } +} + +fn build_dependencies() -> anyhow::Result<()> { + let shell = Shell::new()?; + let code_dir = Path::new("../"); + + let _dir_guard = shell.push_dir(code_dir); + + cmd!(shell, "yarn install") + .run() + .context("Failed to install dependencies") +} diff --git a/zkstack_cli/crates/zkstack/completion/_zkstack.zsh b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh new file mode 100644 index 000000000000..fc6f29851e66 --- /dev/null +++ b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh @@ -0,0 +1,5405 @@ +#compdef zkstack + +autoload -U is-at-least + +_zkstack() { + typeset -A opt_args + typeset -a _arguments_options + local ret=1 + + if is-at-least 5.2; then + _arguments_options=(-s -S -C) + else + _arguments_options=(-s -C) + fi + + local context curcontext="$curcontext" state line + _arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +'-V[Print version]' \ +'--version[Print version]' \ +":: :_zkstack_commands" \ +"*::: :->zkstack" \ +&& ret=0 + case $state in + (zkstack) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-command-$line[1]:" + case $line[1] in + (autocomplete) +_arguments "${_arguments_options[@]}" : \ +'--generate=[The shell to generate the autocomplete script for]:GENERATOR:(bash elvish fish powershell zsh)' \ +'-o+[The out directory to write the autocomplete script to]:OUT:_files' \ +'--out=[The out directory to write the autocomplete script to]:OUT:_files' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(ecosystem) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__ecosystem_commands" \ +"*::: :->ecosystem" \ +&& ret=0 + + case $state in + (ecosystem) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-ecosystem-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +'--ecosystem-name=[]:ECOSYSTEM_NAME:_default' \ +'--l1-network=[L1 Network]:L1_NETWORK:(localhost sepolia holesky mainnet)' \ +'--link-to-code=[Code link]:LINK_TO_CODE:_files -/' \ +'--chain-name=[]:CHAIN_NAME:_default' \ +'--chain-id=[Chain ID]:CHAIN_ID:_default' \ +'--prover-mode=[Prover options]:PROVER_MODE:(no-proofs gpu)' \ +'--wallet-creation=[Wallet options]:WALLET_CREATION:((localhost\:"Load wallets from localhost mnemonic, they are funded for localhost env" +random\:"Generate random wallets" +empty\:"Generate placeholder wallets" +in-file\:"Specify file with wallets"))' \ +'--wallet-path=[Wallet path]:WALLET_PATH:_files' \ +'--l1-batch-commit-data-generator-mode=[Commit data generation mode]:L1_BATCH_COMMIT_DATA_GENERATOR_MODE:(rollup validium)' \ +'--base-token-address=[Base token address]:BASE_TOKEN_ADDRESS:_default' \ +'--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR:_default' \ +'--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR:_default' \ +'--set-as-default=[Set as default chain]' \ +'--evm-emulator=[Enable EVM emulator]' \ +'--start-containers=[Start reth and postgres containers after creation]' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--legacy-bridge[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +'--sender=[Address of the transaction sender]:SENDER:_default' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL:_default' \ +'-o+[Output directory for the generated files]:OUT:_files' \ +'--out=[Output directory for the generated files]:OUT:_files' \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +'--deploy-erc20=[Deploy ERC20 contracts]' \ +'--deploy-ecosystem=[Deploy ecosystem contracts]' \ +'--ecosystem-contracts-path=[Path to ecosystem contracts]:ECOSYSTEM_CONTRACTS_PATH:_files' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL:_default' \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--deploy-paymaster=[Deploy Paymaster contract]' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL:_default' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME:_default' \ +'-o+[Enable Grafana]' \ +'--observability=[Enable Grafana]' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--resume[]' \ +'-d[]' \ +'--dont-drop[]' \ +'--ecosystem-only[Initialize ecosystem only and skip chain initialization (chain can be initialized later with \`chain init\` subcommand)]' \ +'--dev[Use defaults for all options and flags. Suitable for local development]' \ +'--no-port-reallocation[Do not reallocate ports]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(change-default-chain) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +'::name:_default' \ +&& ret=0 +;; +(setup-observability) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__ecosystem__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-ecosystem-help-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(change-default-chain) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup-observability) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(chain) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__chain_commands" \ +"*::: :->chain" \ +&& ret=0 + + case $state in + (chain) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +'--chain-name=[]:CHAIN_NAME:_default' \ +'--chain-id=[Chain ID]:CHAIN_ID:_default' \ +'--prover-mode=[Prover options]:PROVER_MODE:(no-proofs gpu)' \ +'--wallet-creation=[Wallet options]:WALLET_CREATION:((localhost\:"Load wallets from localhost mnemonic, they are funded for localhost env" +random\:"Generate random wallets" +empty\:"Generate placeholder wallets" +in-file\:"Specify file with wallets"))' \ +'--wallet-path=[Wallet path]:WALLET_PATH:_files' \ +'--l1-batch-commit-data-generator-mode=[Commit data generation mode]:L1_BATCH_COMMIT_DATA_GENERATOR_MODE:(rollup validium)' \ +'--base-token-address=[Base token address]:BASE_TOKEN_ADDRESS:_default' \ +'--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR:_default' \ +'--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR:_default' \ +'--set-as-default=[Set as default chain]' \ +'--evm-emulator=[Enable EVM emulator]' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--legacy-bridge[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +'-o+[Output directory for the generated files]:OUT:_files' \ +'--out=[Output directory for the generated files]:OUT:_files' \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL:_default' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME:_default' \ +'--deploy-paymaster=[]' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--resume[]' \ +'-d[]' \ +'--dont-drop[]' \ +'--no-port-reallocation[Do not reallocate ports]' \ +'--dev[Use defaults for all options and flags. Suitable for local development]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +":: :_zkstack__chain__init_commands" \ +"*::: :->init" \ +&& ret=0 + + case $state in + (init) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-init-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL:_default' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME:_default' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-d[Use default database urls and names]' \ +'--dev[Use default database urls and names]' \ +'-d[]' \ +'--dont-drop[]' \ +'--no-port-reallocation[Do not reallocate ports]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__init__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-init-help-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(genesis) +_arguments "${_arguments_options[@]}" : \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL:_default' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-d[Use default database urls and names]' \ +'--dev[Use default database urls and names]' \ +'-d[]' \ +'--dont-drop[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__chain__genesis_commands" \ +"*::: :->genesis" \ +&& ret=0 + + case $state in + (genesis) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-genesis-command-$line[1]:" + case $line[1] in + (init-database) +_arguments "${_arguments_options[@]}" : \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL:_default' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-d[Use default database urls and names]' \ +'--dev[Use default database urls and names]' \ +'-d[]' \ +'--dont-drop[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(server) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__genesis__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-genesis-help-command-$line[1]:" + case $line[1] in + (init-database) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(server) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(register-chain) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-l2-contracts) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(accept-chain-ownership) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(initialize-bridges) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-consensus-registry) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-multicall3) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-timestamp-asserter) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-upgrader) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-paymaster) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(update-token-multiplier-setter) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--resume[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-help-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__help__init_commands" \ +"*::: :->init" \ +&& ret=0 + + case $state in + (init) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-help-init-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(genesis) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__help__genesis_commands" \ +"*::: :->genesis" \ +&& ret=0 + + case $state in + (genesis) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-help-genesis-command-$line[1]:" + case $line[1] in + (init-database) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(server) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(register-chain) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-l2-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(accept-chain-ownership) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(initialize-bridges) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-consensus-registry) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-multicall3) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-timestamp-asserter) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-upgrader) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-paymaster) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(update-token-multiplier-setter) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(dev) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev_commands" \ +"*::: :->dev" \ +&& ret=0 + + case $state in + (dev) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-command-$line[1]:" + case $line[1] in + (database) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__database_commands" \ +"*::: :->database" \ +&& ret=0 + + case $state in + (database) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-database-command-$line[1]:" + case $line[1] in + (check-sqlx-data) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(drop) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(migrate) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(new-migration) +_arguments "${_arguments_options[@]}" : \ +'--database=[Database to create new migration for]:DATABASE:(prover core)' \ +'--name=[Migration name]:NAME:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(prepare) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(reset) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(setup) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__database__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-database-help-command-$line[1]:" + case $line[1] in + (check-sqlx-data) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(drop) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(new-migration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prepare) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(reset) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(test) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__test_commands" \ +"*::: :->test" \ +&& ret=0 + + case $state in + (test) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-test-command-$line[1]:" + case $line[1] in + (integration) +_arguments "${_arguments_options[@]}" : \ +'-t+[Run just the tests matching a pattern. Same as the -t flag on jest.]:TEST_PATTERN:_default' \ +'--test-pattern=[Run just the tests matching a pattern. Same as the -t flag on jest.]:TEST_PATTERN:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-e[Run tests for external node]' \ +'--external-node[Run tests for external node]' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(fees) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'--no-kill[The test will not kill all the nodes during execution]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(revert) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--enable-consensus[Enable consensus]' \ +'-e[Run tests for external node]' \ +'--external-node[Run tests for external node]' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'--no-kill[The test will not kill all the nodes during execution]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(recovery) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-s[Run recovery from a snapshot instead of genesis]' \ +'--snapshot[Run recovery from a snapshot instead of genesis]' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'--no-kill[The test will not kill all the nodes during execution]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(upgrade) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(rust) +_arguments "${_arguments_options[@]}" : \ +'--options=[Cargo test flags]:OPTIONS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(l1-contracts) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(wallet) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(loadtest) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__test__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-test-help-command-$line[1]:" + case $line[1] in + (integration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fees) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(revert) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(recovery) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(upgrade) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(rust) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(l1-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wallet) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(loadtest) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(clean) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__clean_commands" \ +"*::: :->clean" \ +&& ret=0 + + case $state in + (clean) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-clean-command-$line[1]:" + case $line[1] in + (all) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(contracts-cache) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__clean__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-clean-help-command-$line[1]:" + case $line[1] in + (all) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contracts-cache) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(snapshot) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__snapshot_commands" \ +"*::: :->snapshot" \ +&& ret=0 + + case $state in + (snapshot) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-snapshot-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__snapshot__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-snapshot-help-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(lint) +_arguments "${_arguments_options[@]}" : \ +'*-t+[]:TARGETS:(md sol js ts rs contracts autocompletion rust-toolchain)' \ +'*--targets=[]:TARGETS:(md sol js ts rs contracts autocompletion rust-toolchain)' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-c[]' \ +'--check[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(fmt) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-c[]' \ +'--check[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__fmt_commands" \ +"*::: :->fmt" \ +&& ret=0 + + case $state in + (fmt) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-fmt-command-$line[1]:" + case $line[1] in + (rustfmt) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(contract) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(prettier) +_arguments "${_arguments_options[@]}" : \ +'*-t+[]:TARGETS:(md sol js ts rs contracts autocompletion rust-toolchain)' \ +'*--targets=[]:TARGETS:(md sol js ts rs contracts autocompletion rust-toolchain)' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__fmt__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-fmt-help-command-$line[1]:" + case $line[1] in + (rustfmt) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contract) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prettier) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-prover-command-$line[1]:" + case $line[1] in + (info) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(insert-batch) +_arguments "${_arguments_options[@]}" : \ +'--number=[]:NUMBER:_default' \ +'--version=[]:VERSION:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--default[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(insert-version) +_arguments "${_arguments_options[@]}" : \ +'--version=[]:VERSION:_default' \ +'--snark-wrapper=[]:SNARK_WRAPPER:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--default[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__prover__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-prover-help-command-$line[1]:" + case $line[1] in + (info) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-batch) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-version) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(contracts) +_arguments "${_arguments_options[@]}" : \ +'--l1-contracts=[Build L1 contracts]' \ +'--l2-contracts=[Build L2 contracts]' \ +'--system-contracts=[Build system contracts]' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(config-writer) +_arguments "${_arguments_options[@]}" : \ +'-p+[Path to the config file to override]:PATH:_default' \ +'--path=[Path to the config file to override]:PATH:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(send-transactions) +_arguments "${_arguments_options[@]}" : \ +'--file=[]:FILE:_files' \ +'--private-key=[]:PRIVATE_KEY:_default' \ +'--l1-rpc-url=[]:L1_RPC_URL:_default' \ +'--confirmations=[]:CONFIRMATIONS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(status) +_arguments "${_arguments_options[@]}" : \ +'-u+[URL of the health check endpoint]:URL:_default' \ +'--url=[URL of the health check endpoint]:URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__status_commands" \ +"*::: :->status" \ +&& ret=0 + + case $state in + (status) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-status-command-$line[1]:" + case $line[1] in + (ports) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__status__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-status-help-command-$line[1]:" + case $line[1] in + (ports) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(generate-genesis) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-command-$line[1]:" + case $line[1] in + (database) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__database_commands" \ +"*::: :->database" \ +&& ret=0 + + case $state in + (database) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-database-command-$line[1]:" + case $line[1] in + (check-sqlx-data) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(drop) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(new-migration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prepare) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(reset) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(test) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__test_commands" \ +"*::: :->test" \ +&& ret=0 + + case $state in + (test) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-test-command-$line[1]:" + case $line[1] in + (integration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fees) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(revert) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(recovery) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(upgrade) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(rust) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(l1-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wallet) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(loadtest) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(clean) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__clean_commands" \ +"*::: :->clean" \ +&& ret=0 + + case $state in + (clean) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-clean-command-$line[1]:" + case $line[1] in + (all) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contracts-cache) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(snapshot) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__snapshot_commands" \ +"*::: :->snapshot" \ +&& ret=0 + + case $state in + (snapshot) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-snapshot-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(lint) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fmt) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__fmt_commands" \ +"*::: :->fmt" \ +&& ret=0 + + case $state in + (fmt) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-fmt-command-$line[1]:" + case $line[1] in + (rustfmt) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contract) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prettier) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-prover-command-$line[1]:" + case $line[1] in + (info) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-batch) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-version) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(config-writer) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(send-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(status) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__status_commands" \ +"*::: :->status" \ +&& ret=0 + + case $state in + (status) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-status-command-$line[1]:" + case $line[1] in + (ports) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(generate-genesis) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-prover-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +'--proof-store-dir=[]:PROOF_STORE_DIR:_default' \ +'--bucket-base-url=[]:BUCKET_BASE_URL:_default' \ +'--credentials-file=[]:CREDENTIALS_FILE:_default' \ +'--bucket-name=[]:BUCKET_NAME:_default' \ +'--location=[]:LOCATION:_default' \ +'--project-id=[]:PROJECT_ID:_default' \ +'--shall-save-to-public-bucket=[]:SHALL_SAVE_TO_PUBLIC_BUCKET:(true false)' \ +'--public-store-dir=[]:PUBLIC_STORE_DIR:_default' \ +'--public-bucket-base-url=[]:PUBLIC_BUCKET_BASE_URL:_default' \ +'--public-credentials-file=[]:PUBLIC_CREDENTIALS_FILE:_default' \ +'--public-bucket-name=[]:PUBLIC_BUCKET_NAME:_default' \ +'--public-location=[]:PUBLIC_LOCATION:_default' \ +'--public-project-id=[]:PUBLIC_PROJECT_ID:_default' \ +'(--clone)--bellman-cuda-dir=[]:BELLMAN_CUDA_DIR:_default' \ +'--bellman-cuda=[]' \ +'--setup-compressor-key=[]' \ +'--path=[]:PATH:_default' \ +'--region=[]:REGION:(us europe asia)' \ +'--mode=[]:MODE:(download generate)' \ +'--setup-keys=[]' \ +'--setup-database=[]:SETUP_DATABASE:(true false)' \ +'--prover-db-url=[Prover database url without database name]:PROVER_DB_URL:_default' \ +'--prover-db-name=[Prover database name]:PROVER_DB_NAME:_default' \ +'-u+[Use default database urls and names]:USE_DEFAULT:(true false)' \ +'--use-default=[Use default database urls and names]:USE_DEFAULT:(true false)' \ +'-d+[]:DONT_DROP:(true false)' \ +'--dont-drop=[]:DONT_DROP:(true false)' \ +'--cloud-type=[]:CLOUD_TYPE:(gcp local)' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--dev[]' \ +'(--bellman-cuda-dir)--clone[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(setup-keys) +_arguments "${_arguments_options[@]}" : \ +'--region=[]:REGION:(us europe asia)' \ +'--mode=[]:MODE:(download generate)' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +'--component=[]:COMPONENT:(gateway witness-generator witness-vector-generator prover circuit-prover compressor prover-job-monitor)' \ +'--round=[]:ROUND:(all-rounds basic-circuits leaf-aggregation node-aggregation recursion-tip scheduler)' \ +'--threads=[]:THREADS:_default' \ +'--max-allocation=[Memory allocation limit in bytes (for prover component)]:MAX_ALLOCATION:_default' \ +'-l+[]:LIGHT_WVG_COUNT:_default' \ +'--light-wvg-count=[]:LIGHT_WVG_COUNT:_default' \ +'-h+[]:HEAVY_WVG_COUNT:_default' \ +'--heavy-wvg-count=[]:HEAVY_WVG_COUNT:_default' \ +'-m+[]:MAX_ALLOCATION:_default' \ +'--max-allocation=[]:MAX_ALLOCATION:_default' \ +'--docker=[]:DOCKER:(true false)' \ +'--tag=[]:TAG:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(init-bellman-cuda) +_arguments "${_arguments_options[@]}" : \ +'(--clone)--bellman-cuda-dir=[]:BELLMAN_CUDA_DIR:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'(--bellman-cuda-dir)--clone[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(compressor-keys) +_arguments "${_arguments_options[@]}" : \ +'--path=[]:PATH:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__prover__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-prover-help-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup-keys) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init-bellman-cuda) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(compressor-keys) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(server) +_arguments "${_arguments_options[@]}" : \ +'*--components=[Components of server to run]:COMPONENTS:_default' \ +'*-a+[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--genesis[Run server in genesis mode]' \ +'--uring[Enables uring support for RocksDB]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__server_commands" \ +"*::: :->server" \ +&& ret=0 + + case $state in + (server) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-server-command-$line[1]:" + case $line[1] in + (build) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +'*--components=[Components of server to run]:COMPONENTS:_default' \ +'*-a+[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--genesis[Run server in genesis mode]' \ +'--uring[Enables uring support for RocksDB]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(wait) +_arguments "${_arguments_options[@]}" : \ +'-t+[Wait timeout in seconds]:SECONDS:_default' \ +'--timeout=[Wait timeout in seconds]:SECONDS:_default' \ +'--poll-interval=[Poll interval in milliseconds]:MILLIS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__server__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-server-help-command-$line[1]:" + case $line[1] in + (build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wait) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(external-node) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__external-node_commands" \ +"*::: :->external-node" \ +&& ret=0 + + case $state in + (external-node) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-external-node-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +'--db-url=[]:DB_URL:_default' \ +'--db-name=[]:DB_NAME:_default' \ +'--l1-rpc-url=[]:L1_RPC_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-u[Use default database urls and names]' \ +'--use-default[Use default database urls and names]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +'*--components=[Components of server to run]:COMPONENTS:_default' \ +'--enable-consensus=[Enable consensus]' \ +'*-a+[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--reinit[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(wait) +_arguments "${_arguments_options[@]}" : \ +'-t+[Wait timeout in seconds]:SECONDS:_default' \ +'--timeout=[Wait timeout in seconds]:SECONDS:_default' \ +'--poll-interval=[Poll interval in milliseconds]:MILLIS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__external-node__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-external-node-help-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wait) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +'-o+[Enable Grafana]' \ +'--observability=[Enable Grafana]' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(contract-verifier) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__contract-verifier_commands" \ +"*::: :->contract-verifier" \ +&& ret=0 + + case $state in + (contract-verifier) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-contract-verifier-command-$line[1]:" + case $line[1] in + (build) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(wait) +_arguments "${_arguments_options[@]}" : \ +'-t+[Wait timeout in seconds]:SECONDS:_default' \ +'--timeout=[Wait timeout in seconds]:SECONDS:_default' \ +'--poll-interval=[Poll interval in milliseconds]:MILLIS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +'--zksolc-version=[Version of zksolc to install]:ZKSOLC_VERSION:_default' \ +'--zkvyper-version=[Version of zkvyper to install]:ZKVYPER_VERSION:_default' \ +'--solc-version=[Version of solc to install]:SOLC_VERSION:_default' \ +'--era-vm-solc-version=[Version of era vm solc to install]:ERA_VM_SOLC_VERSION:_default' \ +'--vyper-version=[Version of vyper to install]:VYPER_VERSION:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--only[Install only provided compilers]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__contract-verifier__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-contract-verifier-help-command-$line[1]:" + case $line[1] in + (build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wait) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(portal) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(explorer) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__explorer_commands" \ +"*::: :->explorer" \ +&& ret=0 + + case $state in + (explorer) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-explorer-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run-backend) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__explorer__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-explorer-help-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run-backend) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(consensus) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__consensus_commands" \ +"*::: :->consensus" \ +&& ret=0 + + case $state in + (consensus) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-consensus-command-$line[1]:" + case $line[1] in + (set-attester-committee) +_arguments "${_arguments_options[@]}" : \ +'--from-file=[Sets the attester committee in the consensus registry contract to the committee in the yaml file. File format is definied in \`commands/consensus/proto/mod.proto\`]:FROM_FILE:_files' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'--from-genesis[Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(get-attester-committee) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(wait-for-registry) +_arguments "${_arguments_options[@]}" : \ +'-t+[Wait timeout in seconds]:SECONDS:_default' \ +'--timeout=[Wait timeout in seconds]:SECONDS:_default' \ +'--poll-interval=[Poll interval in milliseconds]:MILLIS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__consensus__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-consensus-help-command-$line[1]:" + case $line[1] in + (set-attester-committee) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(get-attester-committee) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wait-for-registry) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(update) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-c[Update only the config files]' \ +'--only-config[Update only the config files]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(markdown) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN:_default' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-command-$line[1]:" + case $line[1] in + (autocomplete) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(ecosystem) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__ecosystem_commands" \ +"*::: :->ecosystem" \ +&& ret=0 + + case $state in + (ecosystem) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-ecosystem-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(change-default-chain) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup-observability) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(chain) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__chain_commands" \ +"*::: :->chain" \ +&& ret=0 + + case $state in + (chain) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-chain-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__chain__init_commands" \ +"*::: :->init" \ +&& ret=0 + + case $state in + (init) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-chain-init-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(genesis) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__chain__genesis_commands" \ +"*::: :->genesis" \ +&& ret=0 + + case $state in + (genesis) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-chain-genesis-command-$line[1]:" + case $line[1] in + (init-database) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(server) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(register-chain) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-l2-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(accept-chain-ownership) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(initialize-bridges) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-consensus-registry) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-multicall3) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-timestamp-asserter) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-upgrader) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-paymaster) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(update-token-multiplier-setter) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(dev) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev_commands" \ +"*::: :->dev" \ +&& ret=0 + + case $state in + (dev) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-command-$line[1]:" + case $line[1] in + (database) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__database_commands" \ +"*::: :->database" \ +&& ret=0 + + case $state in + (database) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-database-command-$line[1]:" + case $line[1] in + (check-sqlx-data) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(drop) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(new-migration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prepare) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(reset) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(test) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__test_commands" \ +"*::: :->test" \ +&& ret=0 + + case $state in + (test) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-test-command-$line[1]:" + case $line[1] in + (integration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fees) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(revert) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(recovery) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(upgrade) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(rust) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(l1-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wallet) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(loadtest) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(clean) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__clean_commands" \ +"*::: :->clean" \ +&& ret=0 + + case $state in + (clean) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-clean-command-$line[1]:" + case $line[1] in + (all) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contracts-cache) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(snapshot) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__snapshot_commands" \ +"*::: :->snapshot" \ +&& ret=0 + + case $state in + (snapshot) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-snapshot-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(lint) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fmt) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__fmt_commands" \ +"*::: :->fmt" \ +&& ret=0 + + case $state in + (fmt) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-fmt-command-$line[1]:" + case $line[1] in + (rustfmt) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contract) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prettier) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-prover-command-$line[1]:" + case $line[1] in + (info) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-batch) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-version) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(config-writer) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(send-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(status) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__status_commands" \ +"*::: :->status" \ +&& ret=0 + + case $state in + (status) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-status-command-$line[1]:" + case $line[1] in + (ports) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(generate-genesis) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-prover-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup-keys) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init-bellman-cuda) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(compressor-keys) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(server) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__server_commands" \ +"*::: :->server" \ +&& ret=0 + + case $state in + (server) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-server-command-$line[1]:" + case $line[1] in + (build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wait) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(external-node) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__external-node_commands" \ +"*::: :->external-node" \ +&& ret=0 + + case $state in + (external-node) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-external-node-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wait) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contract-verifier) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__contract-verifier_commands" \ +"*::: :->contract-verifier" \ +&& ret=0 + + case $state in + (contract-verifier) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-contract-verifier-command-$line[1]:" + case $line[1] in + (build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wait) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(portal) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(explorer) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__explorer_commands" \ +"*::: :->explorer" \ +&& ret=0 + + case $state in + (explorer) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-explorer-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run-backend) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(consensus) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__consensus_commands" \ +"*::: :->consensus" \ +&& ret=0 + + case $state in + (consensus) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-consensus-command-$line[1]:" + case $line[1] in + (set-attester-committee) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(get-attester-committee) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wait-for-registry) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(update) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(markdown) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +} + +(( $+functions[_zkstack_commands] )) || +_zkstack_commands() { + local commands; commands=( +'autocomplete:Create shell autocompletion files' \ +'ecosystem:Ecosystem related commands' \ +'chain:Chain related commands' \ +'dev:Supervisor related commands' \ +'prover:Prover related commands' \ +'server:Run server' \ +'external-node:External Node related commands' \ +'containers:Run containers for local development' \ +'contract-verifier:Run contract verifier' \ +'portal:Run dapp-portal' \ +'explorer:Run block-explorer' \ +'consensus:Consensus utilities' \ +'update:Update ZKsync' \ +'markdown:Print markdown help' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack commands' commands "$@" +} +(( $+functions[_zkstack__autocomplete_commands] )) || +_zkstack__autocomplete_commands() { + local commands; commands=() + _describe -t commands 'zkstack autocomplete commands' commands "$@" +} +(( $+functions[_zkstack__chain_commands] )) || +_zkstack__chain_commands() { + local commands; commands=( +'create:Create a new chain, setting the necessary configurations for later initialization' \ +'build-transactions:Create unsigned transactions for chain deployment' \ +'init:Initialize chain, deploying necessary contracts and performing on-chain operations' \ +'genesis:Run server genesis' \ +'register-chain:Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note\: After completion, L2 governor can accept ownership by running \`accept-chain-ownership\`' \ +'deploy-l2-contracts:Deploy all L2 contracts (executed by L1 governor)' \ +'accept-chain-ownership:Accept ownership of L2 chain (executed by L2 governor). This command should be run after \`register-chain\` to accept ownership of newly created DiamondProxy contract' \ +'initialize-bridges:Initialize bridges on L2' \ +'deploy-consensus-registry:Deploy L2 consensus registry' \ +'deploy-multicall3:Deploy L2 multicall3' \ +'deploy-timestamp-asserter:Deploy L2 TimestampAsserter' \ +'deploy-upgrader:Deploy Default Upgrader' \ +'deploy-paymaster:Deploy paymaster smart contract' \ +'update-token-multiplier-setter:Update Token Multiplier Setter address on L1' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain commands' commands "$@" +} +(( $+functions[_zkstack__chain__accept-chain-ownership_commands] )) || +_zkstack__chain__accept-chain-ownership_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain accept-chain-ownership commands' commands "$@" +} +(( $+functions[_zkstack__chain__build-transactions_commands] )) || +_zkstack__chain__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__chain__create_commands] )) || +_zkstack__chain__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain create commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-consensus-registry_commands] )) || +_zkstack__chain__deploy-consensus-registry_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-consensus-registry commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-l2-contracts_commands] )) || +_zkstack__chain__deploy-l2-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-l2-contracts commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-multicall3_commands] )) || +_zkstack__chain__deploy-multicall3_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-multicall3 commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-paymaster_commands] )) || +_zkstack__chain__deploy-paymaster_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-paymaster commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-timestamp-asserter_commands] )) || +_zkstack__chain__deploy-timestamp-asserter_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-timestamp-asserter commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-upgrader_commands] )) || +_zkstack__chain__deploy-upgrader_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-upgrader commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis_commands] )) || +_zkstack__chain__genesis_commands() { + local commands; commands=( +'init-database:Initialize databases' \ +'server:Runs server genesis' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain genesis commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__help_commands] )) || +_zkstack__chain__genesis__help_commands() { + local commands; commands=( +'init-database:Initialize databases' \ +'server:Runs server genesis' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain genesis help commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__help__help_commands] )) || +_zkstack__chain__genesis__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis help help commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__help__init-database_commands] )) || +_zkstack__chain__genesis__help__init-database_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis help init-database commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__help__server_commands] )) || +_zkstack__chain__genesis__help__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis help server commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__init-database_commands] )) || +_zkstack__chain__genesis__init-database_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis init-database commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__server_commands] )) || +_zkstack__chain__genesis__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis server commands' commands "$@" +} +(( $+functions[_zkstack__chain__help_commands] )) || +_zkstack__chain__help_commands() { + local commands; commands=( +'create:Create a new chain, setting the necessary configurations for later initialization' \ +'build-transactions:Create unsigned transactions for chain deployment' \ +'init:Initialize chain, deploying necessary contracts and performing on-chain operations' \ +'genesis:Run server genesis' \ +'register-chain:Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note\: After completion, L2 governor can accept ownership by running \`accept-chain-ownership\`' \ +'deploy-l2-contracts:Deploy all L2 contracts (executed by L1 governor)' \ +'accept-chain-ownership:Accept ownership of L2 chain (executed by L2 governor). This command should be run after \`register-chain\` to accept ownership of newly created DiamondProxy contract' \ +'initialize-bridges:Initialize bridges on L2' \ +'deploy-consensus-registry:Deploy L2 consensus registry' \ +'deploy-multicall3:Deploy L2 multicall3' \ +'deploy-timestamp-asserter:Deploy L2 TimestampAsserter' \ +'deploy-upgrader:Deploy Default Upgrader' \ +'deploy-paymaster:Deploy paymaster smart contract' \ +'update-token-multiplier-setter:Update Token Multiplier Setter address on L1' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain help commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__accept-chain-ownership_commands] )) || +_zkstack__chain__help__accept-chain-ownership_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help accept-chain-ownership commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__build-transactions_commands] )) || +_zkstack__chain__help__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__create_commands] )) || +_zkstack__chain__help__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help create commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-consensus-registry_commands] )) || +_zkstack__chain__help__deploy-consensus-registry_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-consensus-registry commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-l2-contracts_commands] )) || +_zkstack__chain__help__deploy-l2-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-l2-contracts commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-multicall3_commands] )) || +_zkstack__chain__help__deploy-multicall3_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-multicall3 commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-paymaster_commands] )) || +_zkstack__chain__help__deploy-paymaster_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-paymaster commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-timestamp-asserter_commands] )) || +_zkstack__chain__help__deploy-timestamp-asserter_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-timestamp-asserter commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-upgrader_commands] )) || +_zkstack__chain__help__deploy-upgrader_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-upgrader commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__genesis_commands] )) || +_zkstack__chain__help__genesis_commands() { + local commands; commands=( +'init-database:Initialize databases' \ +'server:Runs server genesis' \ + ) + _describe -t commands 'zkstack chain help genesis commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__genesis__init-database_commands] )) || +_zkstack__chain__help__genesis__init-database_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help genesis init-database commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__genesis__server_commands] )) || +_zkstack__chain__help__genesis__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help genesis server commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__help_commands] )) || +_zkstack__chain__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help help commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__init_commands] )) || +_zkstack__chain__help__init_commands() { + local commands; commands=( +'configs:Initialize chain configs' \ + ) + _describe -t commands 'zkstack chain help init commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__init__configs_commands] )) || +_zkstack__chain__help__init__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help init configs commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__initialize-bridges_commands] )) || +_zkstack__chain__help__initialize-bridges_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help initialize-bridges commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__register-chain_commands] )) || +_zkstack__chain__help__register-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help register-chain commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__update-token-multiplier-setter_commands] )) || +_zkstack__chain__help__update-token-multiplier-setter_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help update-token-multiplier-setter commands' commands "$@" +} +(( $+functions[_zkstack__chain__init_commands] )) || +_zkstack__chain__init_commands() { + local commands; commands=( +'configs:Initialize chain configs' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain init commands' commands "$@" +} +(( $+functions[_zkstack__chain__init__configs_commands] )) || +_zkstack__chain__init__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain init configs commands' commands "$@" +} +(( $+functions[_zkstack__chain__init__help_commands] )) || +_zkstack__chain__init__help_commands() { + local commands; commands=( +'configs:Initialize chain configs' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain init help commands' commands "$@" +} +(( $+functions[_zkstack__chain__init__help__configs_commands] )) || +_zkstack__chain__init__help__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain init help configs commands' commands "$@" +} +(( $+functions[_zkstack__chain__init__help__help_commands] )) || +_zkstack__chain__init__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain init help help commands' commands "$@" +} +(( $+functions[_zkstack__chain__initialize-bridges_commands] )) || +_zkstack__chain__initialize-bridges_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain initialize-bridges commands' commands "$@" +} +(( $+functions[_zkstack__chain__register-chain_commands] )) || +_zkstack__chain__register-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain register-chain commands' commands "$@" +} +(( $+functions[_zkstack__chain__update-token-multiplier-setter_commands] )) || +_zkstack__chain__update-token-multiplier-setter_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain update-token-multiplier-setter commands' commands "$@" +} +(( $+functions[_zkstack__consensus_commands] )) || +_zkstack__consensus_commands() { + local commands; commands=( +'set-attester-committee:Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml' \ +'get-attester-committee:Fetches the attester committee from the consensus registry contract' \ +'wait-for-registry:Wait until the consensus registry contract is deployed to L2' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack consensus commands' commands "$@" +} +(( $+functions[_zkstack__consensus__get-attester-committee_commands] )) || +_zkstack__consensus__get-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus get-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__consensus__help_commands] )) || +_zkstack__consensus__help_commands() { + local commands; commands=( +'set-attester-committee:Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml' \ +'get-attester-committee:Fetches the attester committee from the consensus registry contract' \ +'wait-for-registry:Wait until the consensus registry contract is deployed to L2' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack consensus help commands' commands "$@" +} +(( $+functions[_zkstack__consensus__help__get-attester-committee_commands] )) || +_zkstack__consensus__help__get-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus help get-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__consensus__help__help_commands] )) || +_zkstack__consensus__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus help help commands' commands "$@" +} +(( $+functions[_zkstack__consensus__help__set-attester-committee_commands] )) || +_zkstack__consensus__help__set-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus help set-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__consensus__help__wait-for-registry_commands] )) || +_zkstack__consensus__help__wait-for-registry_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus help wait-for-registry commands' commands "$@" +} +(( $+functions[_zkstack__consensus__set-attester-committee_commands] )) || +_zkstack__consensus__set-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus set-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__consensus__wait-for-registry_commands] )) || +_zkstack__consensus__wait-for-registry_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus wait-for-registry commands' commands "$@" +} +(( $+functions[_zkstack__containers_commands] )) || +_zkstack__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack containers commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier_commands] )) || +_zkstack__contract-verifier_commands() { + local commands; commands=( +'build:Build contract verifier binary' \ +'run:Run contract verifier' \ +'wait:Wait for contract verifier to start' \ +'init:Download required binaries for contract verifier' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack contract-verifier commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__build_commands] )) || +_zkstack__contract-verifier__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier build commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help_commands] )) || +_zkstack__contract-verifier__help_commands() { + local commands; commands=( +'build:Build contract verifier binary' \ +'run:Run contract verifier' \ +'wait:Wait for contract verifier to start' \ +'init:Download required binaries for contract verifier' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack contract-verifier help commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help__build_commands] )) || +_zkstack__contract-verifier__help__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier help build commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help__help_commands] )) || +_zkstack__contract-verifier__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier help help commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help__init_commands] )) || +_zkstack__contract-verifier__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier help init commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help__run_commands] )) || +_zkstack__contract-verifier__help__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier help run commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help__wait_commands] )) || +_zkstack__contract-verifier__help__wait_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier help wait commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__init_commands] )) || +_zkstack__contract-verifier__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier init commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__run_commands] )) || +_zkstack__contract-verifier__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier run commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__wait_commands] )) || +_zkstack__contract-verifier__wait_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier wait commands' commands "$@" +} +(( $+functions[_zkstack__dev_commands] )) || +_zkstack__dev_commands() { + local commands; commands=( +'database:Database related commands' \ +'test:Run tests' \ +'clean:Clean artifacts' \ +'snapshot:Snapshots creator' \ +'lint:Lint code' \ +'fmt:Format code' \ +'prover:Protocol version used by provers' \ +'contracts:Build contracts' \ +'config-writer:Overwrite general config' \ +'send-transactions:Send transactions from file' \ +'status:Get status of the server' \ +'generate-genesis:Generate new genesis file based on current contracts' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean_commands] )) || +_zkstack__dev__clean_commands() { + local commands; commands=( +'all:Remove containers and contracts cache' \ +'containers:Remove containers and docker volumes' \ +'contracts-cache:Remove contracts caches' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev clean commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__all_commands] )) || +_zkstack__dev__clean__all_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean all commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__containers_commands] )) || +_zkstack__dev__clean__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean containers commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__contracts-cache_commands] )) || +_zkstack__dev__clean__contracts-cache_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean contracts-cache commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help_commands] )) || +_zkstack__dev__clean__help_commands() { + local commands; commands=( +'all:Remove containers and contracts cache' \ +'containers:Remove containers and docker volumes' \ +'contracts-cache:Remove contracts caches' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev clean help commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help__all_commands] )) || +_zkstack__dev__clean__help__all_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean help all commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help__containers_commands] )) || +_zkstack__dev__clean__help__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean help containers commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help__contracts-cache_commands] )) || +_zkstack__dev__clean__help__contracts-cache_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean help contracts-cache commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help__help_commands] )) || +_zkstack__dev__clean__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__config-writer_commands] )) || +_zkstack__dev__config-writer_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev config-writer commands' commands "$@" +} +(( $+functions[_zkstack__dev__contracts_commands] )) || +_zkstack__dev__contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__database_commands] )) || +_zkstack__dev__database_commands() { + local commands; commands=( +'check-sqlx-data:Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' \ +'drop:Drop databases. If no databases are selected, all databases will be dropped.' \ +'migrate:Migrate databases. If no databases are selected, all databases will be migrated.' \ +'new-migration:Create new migration' \ +'prepare:Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' \ +'reset:Reset databases. If no databases are selected, all databases will be reset.' \ +'setup:Setup databases. If no databases are selected, all databases will be setup.' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev database commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__check-sqlx-data_commands] )) || +_zkstack__dev__database__check-sqlx-data_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database check-sqlx-data commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__drop_commands] )) || +_zkstack__dev__database__drop_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database drop commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help_commands] )) || +_zkstack__dev__database__help_commands() { + local commands; commands=( +'check-sqlx-data:Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' \ +'drop:Drop databases. If no databases are selected, all databases will be dropped.' \ +'migrate:Migrate databases. If no databases are selected, all databases will be migrated.' \ +'new-migration:Create new migration' \ +'prepare:Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' \ +'reset:Reset databases. If no databases are selected, all databases will be reset.' \ +'setup:Setup databases. If no databases are selected, all databases will be setup.' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev database help commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__check-sqlx-data_commands] )) || +_zkstack__dev__database__help__check-sqlx-data_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help check-sqlx-data commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__drop_commands] )) || +_zkstack__dev__database__help__drop_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help drop commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__help_commands] )) || +_zkstack__dev__database__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__migrate_commands] )) || +_zkstack__dev__database__help__migrate_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help migrate commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__new-migration_commands] )) || +_zkstack__dev__database__help__new-migration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help new-migration commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__prepare_commands] )) || +_zkstack__dev__database__help__prepare_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help prepare commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__reset_commands] )) || +_zkstack__dev__database__help__reset_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help reset commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__setup_commands] )) || +_zkstack__dev__database__help__setup_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help setup commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__migrate_commands] )) || +_zkstack__dev__database__migrate_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database migrate commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__new-migration_commands] )) || +_zkstack__dev__database__new-migration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database new-migration commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__prepare_commands] )) || +_zkstack__dev__database__prepare_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database prepare commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__reset_commands] )) || +_zkstack__dev__database__reset_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database reset commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__setup_commands] )) || +_zkstack__dev__database__setup_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database setup commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt_commands] )) || +_zkstack__dev__fmt_commands() { + local commands; commands=( +'rustfmt:' \ +'contract:' \ +'prettier:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev fmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__contract_commands] )) || +_zkstack__dev__fmt__contract_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt contract commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help_commands] )) || +_zkstack__dev__fmt__help_commands() { + local commands; commands=( +'rustfmt:' \ +'contract:' \ +'prettier:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev fmt help commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help__contract_commands] )) || +_zkstack__dev__fmt__help__contract_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt help contract commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help__help_commands] )) || +_zkstack__dev__fmt__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help__prettier_commands] )) || +_zkstack__dev__fmt__help__prettier_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt help prettier commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help__rustfmt_commands] )) || +_zkstack__dev__fmt__help__rustfmt_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt help rustfmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__prettier_commands] )) || +_zkstack__dev__fmt__prettier_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt prettier commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__rustfmt_commands] )) || +_zkstack__dev__fmt__rustfmt_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt rustfmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__generate-genesis_commands] )) || +_zkstack__dev__generate-genesis_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev generate-genesis commands' commands "$@" +} +(( $+functions[_zkstack__dev__help_commands] )) || +_zkstack__dev__help_commands() { + local commands; commands=( +'database:Database related commands' \ +'test:Run tests' \ +'clean:Clean artifacts' \ +'snapshot:Snapshots creator' \ +'lint:Lint code' \ +'fmt:Format code' \ +'prover:Protocol version used by provers' \ +'contracts:Build contracts' \ +'config-writer:Overwrite general config' \ +'send-transactions:Send transactions from file' \ +'status:Get status of the server' \ +'generate-genesis:Generate new genesis file based on current contracts' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev help commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__clean_commands] )) || +_zkstack__dev__help__clean_commands() { + local commands; commands=( +'all:Remove containers and contracts cache' \ +'containers:Remove containers and docker volumes' \ +'contracts-cache:Remove contracts caches' \ + ) + _describe -t commands 'zkstack dev help clean commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__clean__all_commands] )) || +_zkstack__dev__help__clean__all_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help clean all commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__clean__containers_commands] )) || +_zkstack__dev__help__clean__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help clean containers commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__clean__contracts-cache_commands] )) || +_zkstack__dev__help__clean__contracts-cache_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help clean contracts-cache commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__config-writer_commands] )) || +_zkstack__dev__help__config-writer_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help config-writer commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__contracts_commands] )) || +_zkstack__dev__help__contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database_commands] )) || +_zkstack__dev__help__database_commands() { + local commands; commands=( +'check-sqlx-data:Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' \ +'drop:Drop databases. If no databases are selected, all databases will be dropped.' \ +'migrate:Migrate databases. If no databases are selected, all databases will be migrated.' \ +'new-migration:Create new migration' \ +'prepare:Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' \ +'reset:Reset databases. If no databases are selected, all databases will be reset.' \ +'setup:Setup databases. If no databases are selected, all databases will be setup.' \ + ) + _describe -t commands 'zkstack dev help database commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__check-sqlx-data_commands] )) || +_zkstack__dev__help__database__check-sqlx-data_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database check-sqlx-data commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__drop_commands] )) || +_zkstack__dev__help__database__drop_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database drop commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__migrate_commands] )) || +_zkstack__dev__help__database__migrate_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database migrate commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__new-migration_commands] )) || +_zkstack__dev__help__database__new-migration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database new-migration commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__prepare_commands] )) || +_zkstack__dev__help__database__prepare_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database prepare commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__reset_commands] )) || +_zkstack__dev__help__database__reset_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database reset commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__setup_commands] )) || +_zkstack__dev__help__database__setup_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database setup commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__fmt_commands] )) || +_zkstack__dev__help__fmt_commands() { + local commands; commands=( +'rustfmt:' \ +'contract:' \ +'prettier:' \ + ) + _describe -t commands 'zkstack dev help fmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__fmt__contract_commands] )) || +_zkstack__dev__help__fmt__contract_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help fmt contract commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__fmt__prettier_commands] )) || +_zkstack__dev__help__fmt__prettier_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help fmt prettier commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__fmt__rustfmt_commands] )) || +_zkstack__dev__help__fmt__rustfmt_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help fmt rustfmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__generate-genesis_commands] )) || +_zkstack__dev__help__generate-genesis_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help generate-genesis commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__help_commands] )) || +_zkstack__dev__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__lint_commands] )) || +_zkstack__dev__help__lint_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help lint commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__prover_commands] )) || +_zkstack__dev__help__prover_commands() { + local commands; commands=( +'info:' \ +'insert-batch:' \ +'insert-version:' \ + ) + _describe -t commands 'zkstack dev help prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__prover__info_commands] )) || +_zkstack__dev__help__prover__info_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help prover info commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__prover__insert-batch_commands] )) || +_zkstack__dev__help__prover__insert-batch_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help prover insert-batch commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__prover__insert-version_commands] )) || +_zkstack__dev__help__prover__insert-version_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help prover insert-version commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__send-transactions_commands] )) || +_zkstack__dev__help__send-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help send-transactions commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__snapshot_commands] )) || +_zkstack__dev__help__snapshot_commands() { + local commands; commands=( +'create:' \ + ) + _describe -t commands 'zkstack dev help snapshot commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__snapshot__create_commands] )) || +_zkstack__dev__help__snapshot__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help snapshot create commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__status_commands] )) || +_zkstack__dev__help__status_commands() { + local commands; commands=( +'ports:Show used ports' \ + ) + _describe -t commands 'zkstack dev help status commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__status__ports_commands] )) || +_zkstack__dev__help__status__ports_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help status ports commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test_commands] )) || +_zkstack__dev__help__test_commands() { + local commands; commands=( +'integration:Run integration tests' \ +'fees:Run fees test' \ +'revert:Run revert tests' \ +'recovery:Run recovery tests' \ +'upgrade:Run upgrade tests' \ +'build:Build all test dependencies' \ +'rust:Run unit-tests, accepts optional cargo test flags' \ +'l1-contracts:Run L1 contracts tests' \ +'prover:Run prover tests' \ +'wallet:Print test wallets information' \ +'loadtest:Run loadtest' \ + ) + _describe -t commands 'zkstack dev help test commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__build_commands] )) || +_zkstack__dev__help__test__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test build commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__fees_commands] )) || +_zkstack__dev__help__test__fees_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test fees commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__integration_commands] )) || +_zkstack__dev__help__test__integration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test integration commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__l1-contracts_commands] )) || +_zkstack__dev__help__test__l1-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test l1-contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__loadtest_commands] )) || +_zkstack__dev__help__test__loadtest_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test loadtest commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__prover_commands] )) || +_zkstack__dev__help__test__prover_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__recovery_commands] )) || +_zkstack__dev__help__test__recovery_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test recovery commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__revert_commands] )) || +_zkstack__dev__help__test__revert_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test revert commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__rust_commands] )) || +_zkstack__dev__help__test__rust_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test rust commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__upgrade_commands] )) || +_zkstack__dev__help__test__upgrade_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test upgrade commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__wallet_commands] )) || +_zkstack__dev__help__test__wallet_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test wallet commands' commands "$@" +} +(( $+functions[_zkstack__dev__lint_commands] )) || +_zkstack__dev__lint_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev lint commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover_commands] )) || +_zkstack__dev__prover_commands() { + local commands; commands=( +'info:' \ +'insert-batch:' \ +'insert-version:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help_commands] )) || +_zkstack__dev__prover__help_commands() { + local commands; commands=( +'info:' \ +'insert-batch:' \ +'insert-version:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev prover help commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help__help_commands] )) || +_zkstack__dev__prover__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help__info_commands] )) || +_zkstack__dev__prover__help__info_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover help info commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help__insert-batch_commands] )) || +_zkstack__dev__prover__help__insert-batch_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover help insert-batch commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help__insert-version_commands] )) || +_zkstack__dev__prover__help__insert-version_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover help insert-version commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__info_commands] )) || +_zkstack__dev__prover__info_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover info commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__insert-batch_commands] )) || +_zkstack__dev__prover__insert-batch_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover insert-batch commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__insert-version_commands] )) || +_zkstack__dev__prover__insert-version_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover insert-version commands' commands "$@" +} +(( $+functions[_zkstack__dev__send-transactions_commands] )) || +_zkstack__dev__send-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev send-transactions commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot_commands] )) || +_zkstack__dev__snapshot_commands() { + local commands; commands=( +'create:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev snapshot commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot__create_commands] )) || +_zkstack__dev__snapshot__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev snapshot create commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot__help_commands] )) || +_zkstack__dev__snapshot__help_commands() { + local commands; commands=( +'create:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev snapshot help commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot__help__create_commands] )) || +_zkstack__dev__snapshot__help__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev snapshot help create commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot__help__help_commands] )) || +_zkstack__dev__snapshot__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev snapshot help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__status_commands] )) || +_zkstack__dev__status_commands() { + local commands; commands=( +'ports:Show used ports' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev status commands' commands "$@" +} +(( $+functions[_zkstack__dev__status__help_commands] )) || +_zkstack__dev__status__help_commands() { + local commands; commands=( +'ports:Show used ports' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev status help commands' commands "$@" +} +(( $+functions[_zkstack__dev__status__help__help_commands] )) || +_zkstack__dev__status__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev status help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__status__help__ports_commands] )) || +_zkstack__dev__status__help__ports_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev status help ports commands' commands "$@" +} +(( $+functions[_zkstack__dev__status__ports_commands] )) || +_zkstack__dev__status__ports_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev status ports commands' commands "$@" +} +(( $+functions[_zkstack__dev__test_commands] )) || +_zkstack__dev__test_commands() { + local commands; commands=( +'integration:Run integration tests' \ +'fees:Run fees test' \ +'revert:Run revert tests' \ +'recovery:Run recovery tests' \ +'upgrade:Run upgrade tests' \ +'build:Build all test dependencies' \ +'rust:Run unit-tests, accepts optional cargo test flags' \ +'l1-contracts:Run L1 contracts tests' \ +'prover:Run prover tests' \ +'wallet:Print test wallets information' \ +'loadtest:Run loadtest' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev test commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__build_commands] )) || +_zkstack__dev__test__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test build commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__fees_commands] )) || +_zkstack__dev__test__fees_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test fees commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help_commands] )) || +_zkstack__dev__test__help_commands() { + local commands; commands=( +'integration:Run integration tests' \ +'fees:Run fees test' \ +'revert:Run revert tests' \ +'recovery:Run recovery tests' \ +'upgrade:Run upgrade tests' \ +'build:Build all test dependencies' \ +'rust:Run unit-tests, accepts optional cargo test flags' \ +'l1-contracts:Run L1 contracts tests' \ +'prover:Run prover tests' \ +'wallet:Print test wallets information' \ +'loadtest:Run loadtest' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev test help commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__build_commands] )) || +_zkstack__dev__test__help__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help build commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__fees_commands] )) || +_zkstack__dev__test__help__fees_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help fees commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__help_commands] )) || +_zkstack__dev__test__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__integration_commands] )) || +_zkstack__dev__test__help__integration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help integration commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__l1-contracts_commands] )) || +_zkstack__dev__test__help__l1-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help l1-contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__loadtest_commands] )) || +_zkstack__dev__test__help__loadtest_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help loadtest commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__prover_commands] )) || +_zkstack__dev__test__help__prover_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__recovery_commands] )) || +_zkstack__dev__test__help__recovery_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help recovery commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__revert_commands] )) || +_zkstack__dev__test__help__revert_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help revert commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__rust_commands] )) || +_zkstack__dev__test__help__rust_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help rust commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__upgrade_commands] )) || +_zkstack__dev__test__help__upgrade_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help upgrade commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__wallet_commands] )) || +_zkstack__dev__test__help__wallet_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help wallet commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__integration_commands] )) || +_zkstack__dev__test__integration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test integration commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__l1-contracts_commands] )) || +_zkstack__dev__test__l1-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test l1-contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__loadtest_commands] )) || +_zkstack__dev__test__loadtest_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test loadtest commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__prover_commands] )) || +_zkstack__dev__test__prover_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__recovery_commands] )) || +_zkstack__dev__test__recovery_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test recovery commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__revert_commands] )) || +_zkstack__dev__test__revert_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test revert commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__rust_commands] )) || +_zkstack__dev__test__rust_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test rust commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__upgrade_commands] )) || +_zkstack__dev__test__upgrade_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test upgrade commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__wallet_commands] )) || +_zkstack__dev__test__wallet_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test wallet commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem_commands] )) || +_zkstack__ecosystem_commands() { + local commands; commands=( +'create:Create a new ecosystem and chain, setting necessary configurations for later initialization' \ +'build-transactions:Create transactions to build ecosystem contracts' \ +'init:Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' \ +'change-default-chain:Change the default chain' \ +'setup-observability:Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack ecosystem commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__build-transactions_commands] )) || +_zkstack__ecosystem__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__change-default-chain_commands] )) || +_zkstack__ecosystem__change-default-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem change-default-chain commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__create_commands] )) || +_zkstack__ecosystem__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem create commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help_commands] )) || +_zkstack__ecosystem__help_commands() { + local commands; commands=( +'create:Create a new ecosystem and chain, setting necessary configurations for later initialization' \ +'build-transactions:Create transactions to build ecosystem contracts' \ +'init:Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' \ +'change-default-chain:Change the default chain' \ +'setup-observability:Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack ecosystem help commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__build-transactions_commands] )) || +_zkstack__ecosystem__help__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__change-default-chain_commands] )) || +_zkstack__ecosystem__help__change-default-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help change-default-chain commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__create_commands] )) || +_zkstack__ecosystem__help__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help create commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__help_commands] )) || +_zkstack__ecosystem__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help help commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__init_commands] )) || +_zkstack__ecosystem__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help init commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__setup-observability_commands] )) || +_zkstack__ecosystem__help__setup-observability_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help setup-observability commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__init_commands] )) || +_zkstack__ecosystem__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem init commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__setup-observability_commands] )) || +_zkstack__ecosystem__setup-observability_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem setup-observability commands' commands "$@" +} +(( $+functions[_zkstack__explorer_commands] )) || +_zkstack__explorer_commands() { + local commands; commands=( +'init:Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' \ +'run-backend:Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' \ +'run:Run explorer app' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack explorer commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help_commands] )) || +_zkstack__explorer__help_commands() { + local commands; commands=( +'init:Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' \ +'run-backend:Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' \ +'run:Run explorer app' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack explorer help commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help__help_commands] )) || +_zkstack__explorer__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer help help commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help__init_commands] )) || +_zkstack__explorer__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer help init commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help__run_commands] )) || +_zkstack__explorer__help__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer help run commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help__run-backend_commands] )) || +_zkstack__explorer__help__run-backend_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer help run-backend commands' commands "$@" +} +(( $+functions[_zkstack__explorer__init_commands] )) || +_zkstack__explorer__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer init commands' commands "$@" +} +(( $+functions[_zkstack__explorer__run_commands] )) || +_zkstack__explorer__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer run commands' commands "$@" +} +(( $+functions[_zkstack__explorer__run-backend_commands] )) || +_zkstack__explorer__run-backend_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer run-backend commands' commands "$@" +} +(( $+functions[_zkstack__external-node_commands] )) || +_zkstack__external-node_commands() { + local commands; commands=( +'configs:Prepare configs for EN' \ +'init:Init databases' \ +'build:Build external node' \ +'run:Run external node' \ +'wait:Wait for external node to start' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack external-node commands' commands "$@" +} +(( $+functions[_zkstack__external-node__build_commands] )) || +_zkstack__external-node__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node build commands' commands "$@" +} +(( $+functions[_zkstack__external-node__configs_commands] )) || +_zkstack__external-node__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node configs commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help_commands] )) || +_zkstack__external-node__help_commands() { + local commands; commands=( +'configs:Prepare configs for EN' \ +'init:Init databases' \ +'build:Build external node' \ +'run:Run external node' \ +'wait:Wait for external node to start' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack external-node help commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__build_commands] )) || +_zkstack__external-node__help__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help build commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__configs_commands] )) || +_zkstack__external-node__help__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help configs commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__help_commands] )) || +_zkstack__external-node__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help help commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__init_commands] )) || +_zkstack__external-node__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help init commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__run_commands] )) || +_zkstack__external-node__help__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help run commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__wait_commands] )) || +_zkstack__external-node__help__wait_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help wait commands' commands "$@" +} +(( $+functions[_zkstack__external-node__init_commands] )) || +_zkstack__external-node__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node init commands' commands "$@" +} +(( $+functions[_zkstack__external-node__run_commands] )) || +_zkstack__external-node__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node run commands' commands "$@" +} +(( $+functions[_zkstack__external-node__wait_commands] )) || +_zkstack__external-node__wait_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node wait commands' commands "$@" +} +(( $+functions[_zkstack__help_commands] )) || +_zkstack__help_commands() { + local commands; commands=( +'autocomplete:Create shell autocompletion files' \ +'ecosystem:Ecosystem related commands' \ +'chain:Chain related commands' \ +'dev:Supervisor related commands' \ +'prover:Prover related commands' \ +'server:Run server' \ +'external-node:External Node related commands' \ +'containers:Run containers for local development' \ +'contract-verifier:Run contract verifier' \ +'portal:Run dapp-portal' \ +'explorer:Run block-explorer' \ +'consensus:Consensus utilities' \ +'update:Update ZKsync' \ +'markdown:Print markdown help' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack help commands' commands "$@" +} +(( $+functions[_zkstack__help__autocomplete_commands] )) || +_zkstack__help__autocomplete_commands() { + local commands; commands=() + _describe -t commands 'zkstack help autocomplete commands' commands "$@" +} +(( $+functions[_zkstack__help__chain_commands] )) || +_zkstack__help__chain_commands() { + local commands; commands=( +'create:Create a new chain, setting the necessary configurations for later initialization' \ +'build-transactions:Create unsigned transactions for chain deployment' \ +'init:Initialize chain, deploying necessary contracts and performing on-chain operations' \ +'genesis:Run server genesis' \ +'register-chain:Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note\: After completion, L2 governor can accept ownership by running \`accept-chain-ownership\`' \ +'deploy-l2-contracts:Deploy all L2 contracts (executed by L1 governor)' \ +'accept-chain-ownership:Accept ownership of L2 chain (executed by L2 governor). This command should be run after \`register-chain\` to accept ownership of newly created DiamondProxy contract' \ +'initialize-bridges:Initialize bridges on L2' \ +'deploy-consensus-registry:Deploy L2 consensus registry' \ +'deploy-multicall3:Deploy L2 multicall3' \ +'deploy-timestamp-asserter:Deploy L2 TimestampAsserter' \ +'deploy-upgrader:Deploy Default Upgrader' \ +'deploy-paymaster:Deploy paymaster smart contract' \ +'update-token-multiplier-setter:Update Token Multiplier Setter address on L1' \ + ) + _describe -t commands 'zkstack help chain commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__accept-chain-ownership_commands] )) || +_zkstack__help__chain__accept-chain-ownership_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain accept-chain-ownership commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__build-transactions_commands] )) || +_zkstack__help__chain__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__create_commands] )) || +_zkstack__help__chain__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain create commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-consensus-registry_commands] )) || +_zkstack__help__chain__deploy-consensus-registry_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-consensus-registry commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-l2-contracts_commands] )) || +_zkstack__help__chain__deploy-l2-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-l2-contracts commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-multicall3_commands] )) || +_zkstack__help__chain__deploy-multicall3_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-multicall3 commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-paymaster_commands] )) || +_zkstack__help__chain__deploy-paymaster_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-paymaster commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-timestamp-asserter_commands] )) || +_zkstack__help__chain__deploy-timestamp-asserter_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-timestamp-asserter commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-upgrader_commands] )) || +_zkstack__help__chain__deploy-upgrader_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-upgrader commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__genesis_commands] )) || +_zkstack__help__chain__genesis_commands() { + local commands; commands=( +'init-database:Initialize databases' \ +'server:Runs server genesis' \ + ) + _describe -t commands 'zkstack help chain genesis commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__genesis__init-database_commands] )) || +_zkstack__help__chain__genesis__init-database_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain genesis init-database commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__genesis__server_commands] )) || +_zkstack__help__chain__genesis__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain genesis server commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__init_commands] )) || +_zkstack__help__chain__init_commands() { + local commands; commands=( +'configs:Initialize chain configs' \ + ) + _describe -t commands 'zkstack help chain init commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__init__configs_commands] )) || +_zkstack__help__chain__init__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain init configs commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__initialize-bridges_commands] )) || +_zkstack__help__chain__initialize-bridges_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain initialize-bridges commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__register-chain_commands] )) || +_zkstack__help__chain__register-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain register-chain commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__update-token-multiplier-setter_commands] )) || +_zkstack__help__chain__update-token-multiplier-setter_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain update-token-multiplier-setter commands' commands "$@" +} +(( $+functions[_zkstack__help__consensus_commands] )) || +_zkstack__help__consensus_commands() { + local commands; commands=( +'set-attester-committee:Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml' \ +'get-attester-committee:Fetches the attester committee from the consensus registry contract' \ +'wait-for-registry:Wait until the consensus registry contract is deployed to L2' \ + ) + _describe -t commands 'zkstack help consensus commands' commands "$@" +} +(( $+functions[_zkstack__help__consensus__get-attester-committee_commands] )) || +_zkstack__help__consensus__get-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack help consensus get-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__help__consensus__set-attester-committee_commands] )) || +_zkstack__help__consensus__set-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack help consensus set-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__help__consensus__wait-for-registry_commands] )) || +_zkstack__help__consensus__wait-for-registry_commands() { + local commands; commands=() + _describe -t commands 'zkstack help consensus wait-for-registry commands' commands "$@" +} +(( $+functions[_zkstack__help__containers_commands] )) || +_zkstack__help__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack help containers commands' commands "$@" +} +(( $+functions[_zkstack__help__contract-verifier_commands] )) || +_zkstack__help__contract-verifier_commands() { + local commands; commands=( +'build:Build contract verifier binary' \ +'run:Run contract verifier' \ +'wait:Wait for contract verifier to start' \ +'init:Download required binaries for contract verifier' \ + ) + _describe -t commands 'zkstack help contract-verifier commands' commands "$@" +} +(( $+functions[_zkstack__help__contract-verifier__build_commands] )) || +_zkstack__help__contract-verifier__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack help contract-verifier build commands' commands "$@" +} +(( $+functions[_zkstack__help__contract-verifier__init_commands] )) || +_zkstack__help__contract-verifier__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help contract-verifier init commands' commands "$@" +} +(( $+functions[_zkstack__help__contract-verifier__run_commands] )) || +_zkstack__help__contract-verifier__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack help contract-verifier run commands' commands "$@" +} +(( $+functions[_zkstack__help__contract-verifier__wait_commands] )) || +_zkstack__help__contract-verifier__wait_commands() { + local commands; commands=() + _describe -t commands 'zkstack help contract-verifier wait commands' commands "$@" +} +(( $+functions[_zkstack__help__dev_commands] )) || +_zkstack__help__dev_commands() { + local commands; commands=( +'database:Database related commands' \ +'test:Run tests' \ +'clean:Clean artifacts' \ +'snapshot:Snapshots creator' \ +'lint:Lint code' \ +'fmt:Format code' \ +'prover:Protocol version used by provers' \ +'contracts:Build contracts' \ +'config-writer:Overwrite general config' \ +'send-transactions:Send transactions from file' \ +'status:Get status of the server' \ +'generate-genesis:Generate new genesis file based on current contracts' \ + ) + _describe -t commands 'zkstack help dev commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__clean_commands] )) || +_zkstack__help__dev__clean_commands() { + local commands; commands=( +'all:Remove containers and contracts cache' \ +'containers:Remove containers and docker volumes' \ +'contracts-cache:Remove contracts caches' \ + ) + _describe -t commands 'zkstack help dev clean commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__clean__all_commands] )) || +_zkstack__help__dev__clean__all_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev clean all commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__clean__containers_commands] )) || +_zkstack__help__dev__clean__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev clean containers commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__clean__contracts-cache_commands] )) || +_zkstack__help__dev__clean__contracts-cache_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev clean contracts-cache commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__config-writer_commands] )) || +_zkstack__help__dev__config-writer_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev config-writer commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__contracts_commands] )) || +_zkstack__help__dev__contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev contracts commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database_commands] )) || +_zkstack__help__dev__database_commands() { + local commands; commands=( +'check-sqlx-data:Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' \ +'drop:Drop databases. If no databases are selected, all databases will be dropped.' \ +'migrate:Migrate databases. If no databases are selected, all databases will be migrated.' \ +'new-migration:Create new migration' \ +'prepare:Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' \ +'reset:Reset databases. If no databases are selected, all databases will be reset.' \ +'setup:Setup databases. If no databases are selected, all databases will be setup.' \ + ) + _describe -t commands 'zkstack help dev database commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__check-sqlx-data_commands] )) || +_zkstack__help__dev__database__check-sqlx-data_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database check-sqlx-data commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__drop_commands] )) || +_zkstack__help__dev__database__drop_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database drop commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__migrate_commands] )) || +_zkstack__help__dev__database__migrate_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database migrate commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__new-migration_commands] )) || +_zkstack__help__dev__database__new-migration_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database new-migration commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__prepare_commands] )) || +_zkstack__help__dev__database__prepare_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database prepare commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__reset_commands] )) || +_zkstack__help__dev__database__reset_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database reset commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__setup_commands] )) || +_zkstack__help__dev__database__setup_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database setup commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__fmt_commands] )) || +_zkstack__help__dev__fmt_commands() { + local commands; commands=( +'rustfmt:' \ +'contract:' \ +'prettier:' \ + ) + _describe -t commands 'zkstack help dev fmt commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__fmt__contract_commands] )) || +_zkstack__help__dev__fmt__contract_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev fmt contract commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__fmt__prettier_commands] )) || +_zkstack__help__dev__fmt__prettier_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev fmt prettier commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__fmt__rustfmt_commands] )) || +_zkstack__help__dev__fmt__rustfmt_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev fmt rustfmt commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__generate-genesis_commands] )) || +_zkstack__help__dev__generate-genesis_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev generate-genesis commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__lint_commands] )) || +_zkstack__help__dev__lint_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev lint commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__prover_commands] )) || +_zkstack__help__dev__prover_commands() { + local commands; commands=( +'info:' \ +'insert-batch:' \ +'insert-version:' \ + ) + _describe -t commands 'zkstack help dev prover commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__prover__info_commands] )) || +_zkstack__help__dev__prover__info_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev prover info commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__prover__insert-batch_commands] )) || +_zkstack__help__dev__prover__insert-batch_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev prover insert-batch commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__prover__insert-version_commands] )) || +_zkstack__help__dev__prover__insert-version_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev prover insert-version commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__send-transactions_commands] )) || +_zkstack__help__dev__send-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev send-transactions commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__snapshot_commands] )) || +_zkstack__help__dev__snapshot_commands() { + local commands; commands=( +'create:' \ + ) + _describe -t commands 'zkstack help dev snapshot commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__snapshot__create_commands] )) || +_zkstack__help__dev__snapshot__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev snapshot create commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__status_commands] )) || +_zkstack__help__dev__status_commands() { + local commands; commands=( +'ports:Show used ports' \ + ) + _describe -t commands 'zkstack help dev status commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__status__ports_commands] )) || +_zkstack__help__dev__status__ports_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev status ports commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test_commands] )) || +_zkstack__help__dev__test_commands() { + local commands; commands=( +'integration:Run integration tests' \ +'fees:Run fees test' \ +'revert:Run revert tests' \ +'recovery:Run recovery tests' \ +'upgrade:Run upgrade tests' \ +'build:Build all test dependencies' \ +'rust:Run unit-tests, accepts optional cargo test flags' \ +'l1-contracts:Run L1 contracts tests' \ +'prover:Run prover tests' \ +'wallet:Print test wallets information' \ +'loadtest:Run loadtest' \ + ) + _describe -t commands 'zkstack help dev test commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__build_commands] )) || +_zkstack__help__dev__test__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test build commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__fees_commands] )) || +_zkstack__help__dev__test__fees_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test fees commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__integration_commands] )) || +_zkstack__help__dev__test__integration_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test integration commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__l1-contracts_commands] )) || +_zkstack__help__dev__test__l1-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test l1-contracts commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__loadtest_commands] )) || +_zkstack__help__dev__test__loadtest_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test loadtest commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__prover_commands] )) || +_zkstack__help__dev__test__prover_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test prover commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__recovery_commands] )) || +_zkstack__help__dev__test__recovery_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test recovery commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__revert_commands] )) || +_zkstack__help__dev__test__revert_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test revert commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__rust_commands] )) || +_zkstack__help__dev__test__rust_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test rust commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__upgrade_commands] )) || +_zkstack__help__dev__test__upgrade_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test upgrade commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__wallet_commands] )) || +_zkstack__help__dev__test__wallet_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test wallet commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem_commands] )) || +_zkstack__help__ecosystem_commands() { + local commands; commands=( +'create:Create a new ecosystem and chain, setting necessary configurations for later initialization' \ +'build-transactions:Create transactions to build ecosystem contracts' \ +'init:Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' \ +'change-default-chain:Change the default chain' \ +'setup-observability:Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' \ + ) + _describe -t commands 'zkstack help ecosystem commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__build-transactions_commands] )) || +_zkstack__help__ecosystem__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__change-default-chain_commands] )) || +_zkstack__help__ecosystem__change-default-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem change-default-chain commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__create_commands] )) || +_zkstack__help__ecosystem__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem create commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__init_commands] )) || +_zkstack__help__ecosystem__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem init commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__setup-observability_commands] )) || +_zkstack__help__ecosystem__setup-observability_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem setup-observability commands' commands "$@" +} +(( $+functions[_zkstack__help__explorer_commands] )) || +_zkstack__help__explorer_commands() { + local commands; commands=( +'init:Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' \ +'run-backend:Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' \ +'run:Run explorer app' \ + ) + _describe -t commands 'zkstack help explorer commands' commands "$@" +} +(( $+functions[_zkstack__help__explorer__init_commands] )) || +_zkstack__help__explorer__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help explorer init commands' commands "$@" +} +(( $+functions[_zkstack__help__explorer__run_commands] )) || +_zkstack__help__explorer__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack help explorer run commands' commands "$@" +} +(( $+functions[_zkstack__help__explorer__run-backend_commands] )) || +_zkstack__help__explorer__run-backend_commands() { + local commands; commands=() + _describe -t commands 'zkstack help explorer run-backend commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node_commands] )) || +_zkstack__help__external-node_commands() { + local commands; commands=( +'configs:Prepare configs for EN' \ +'init:Init databases' \ +'build:Build external node' \ +'run:Run external node' \ +'wait:Wait for external node to start' \ + ) + _describe -t commands 'zkstack help external-node commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node__build_commands] )) || +_zkstack__help__external-node__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack help external-node build commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node__configs_commands] )) || +_zkstack__help__external-node__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack help external-node configs commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node__init_commands] )) || +_zkstack__help__external-node__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help external-node init commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node__run_commands] )) || +_zkstack__help__external-node__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack help external-node run commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node__wait_commands] )) || +_zkstack__help__external-node__wait_commands() { + local commands; commands=() + _describe -t commands 'zkstack help external-node wait commands' commands "$@" +} +(( $+functions[_zkstack__help__help_commands] )) || +_zkstack__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack help help commands' commands "$@" +} +(( $+functions[_zkstack__help__markdown_commands] )) || +_zkstack__help__markdown_commands() { + local commands; commands=() + _describe -t commands 'zkstack help markdown commands' commands "$@" +} +(( $+functions[_zkstack__help__portal_commands] )) || +_zkstack__help__portal_commands() { + local commands; commands=() + _describe -t commands 'zkstack help portal commands' commands "$@" +} +(( $+functions[_zkstack__help__prover_commands] )) || +_zkstack__help__prover_commands() { + local commands; commands=( +'init:Initialize prover' \ +'setup-keys:Generate setup keys' \ +'run:Run prover' \ +'init-bellman-cuda:Initialize bellman-cuda' \ +'compressor-keys:Download compressor keys' \ + ) + _describe -t commands 'zkstack help prover commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__compressor-keys_commands] )) || +_zkstack__help__prover__compressor-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover compressor-keys commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__init_commands] )) || +_zkstack__help__prover__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover init commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__init-bellman-cuda_commands] )) || +_zkstack__help__prover__init-bellman-cuda_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover init-bellman-cuda commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__run_commands] )) || +_zkstack__help__prover__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover run commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__setup-keys_commands] )) || +_zkstack__help__prover__setup-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover setup-keys commands' commands "$@" +} +(( $+functions[_zkstack__help__server_commands] )) || +_zkstack__help__server_commands() { + local commands; commands=( +'build:Builds server' \ +'run:Runs server' \ +'wait:Waits for server to start' \ + ) + _describe -t commands 'zkstack help server commands' commands "$@" +} +(( $+functions[_zkstack__help__server__build_commands] )) || +_zkstack__help__server__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack help server build commands' commands "$@" +} +(( $+functions[_zkstack__help__server__run_commands] )) || +_zkstack__help__server__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack help server run commands' commands "$@" +} +(( $+functions[_zkstack__help__server__wait_commands] )) || +_zkstack__help__server__wait_commands() { + local commands; commands=() + _describe -t commands 'zkstack help server wait commands' commands "$@" +} +(( $+functions[_zkstack__help__update_commands] )) || +_zkstack__help__update_commands() { + local commands; commands=() + _describe -t commands 'zkstack help update commands' commands "$@" +} +(( $+functions[_zkstack__markdown_commands] )) || +_zkstack__markdown_commands() { + local commands; commands=() + _describe -t commands 'zkstack markdown commands' commands "$@" +} +(( $+functions[_zkstack__portal_commands] )) || +_zkstack__portal_commands() { + local commands; commands=() + _describe -t commands 'zkstack portal commands' commands "$@" +} +(( $+functions[_zkstack__prover_commands] )) || +_zkstack__prover_commands() { + local commands; commands=( +'init:Initialize prover' \ +'setup-keys:Generate setup keys' \ +'run:Run prover' \ +'init-bellman-cuda:Initialize bellman-cuda' \ +'compressor-keys:Download compressor keys' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack prover commands' commands "$@" +} +(( $+functions[_zkstack__prover__compressor-keys_commands] )) || +_zkstack__prover__compressor-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover compressor-keys commands' commands "$@" +} +(( $+functions[_zkstack__prover__help_commands] )) || +_zkstack__prover__help_commands() { + local commands; commands=( +'init:Initialize prover' \ +'setup-keys:Generate setup keys' \ +'run:Run prover' \ +'init-bellman-cuda:Initialize bellman-cuda' \ +'compressor-keys:Download compressor keys' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack prover help commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__compressor-keys_commands] )) || +_zkstack__prover__help__compressor-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help compressor-keys commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__help_commands] )) || +_zkstack__prover__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help help commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__init_commands] )) || +_zkstack__prover__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help init commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__init-bellman-cuda_commands] )) || +_zkstack__prover__help__init-bellman-cuda_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help init-bellman-cuda commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__run_commands] )) || +_zkstack__prover__help__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help run commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__setup-keys_commands] )) || +_zkstack__prover__help__setup-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help setup-keys commands' commands "$@" +} +(( $+functions[_zkstack__prover__init_commands] )) || +_zkstack__prover__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover init commands' commands "$@" +} +(( $+functions[_zkstack__prover__init-bellman-cuda_commands] )) || +_zkstack__prover__init-bellman-cuda_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover init-bellman-cuda commands' commands "$@" +} +(( $+functions[_zkstack__prover__run_commands] )) || +_zkstack__prover__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover run commands' commands "$@" +} +(( $+functions[_zkstack__prover__setup-keys_commands] )) || +_zkstack__prover__setup-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover setup-keys commands' commands "$@" +} +(( $+functions[_zkstack__server_commands] )) || +_zkstack__server_commands() { + local commands; commands=( +'build:Builds server' \ +'run:Runs server' \ +'wait:Waits for server to start' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack server commands' commands "$@" +} +(( $+functions[_zkstack__server__build_commands] )) || +_zkstack__server__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack server build commands' commands "$@" +} +(( $+functions[_zkstack__server__help_commands] )) || +_zkstack__server__help_commands() { + local commands; commands=( +'build:Builds server' \ +'run:Runs server' \ +'wait:Waits for server to start' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack server help commands' commands "$@" +} +(( $+functions[_zkstack__server__help__build_commands] )) || +_zkstack__server__help__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack server help build commands' commands "$@" +} +(( $+functions[_zkstack__server__help__help_commands] )) || +_zkstack__server__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack server help help commands' commands "$@" +} +(( $+functions[_zkstack__server__help__run_commands] )) || +_zkstack__server__help__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack server help run commands' commands "$@" +} +(( $+functions[_zkstack__server__help__wait_commands] )) || +_zkstack__server__help__wait_commands() { + local commands; commands=() + _describe -t commands 'zkstack server help wait commands' commands "$@" +} +(( $+functions[_zkstack__server__run_commands] )) || +_zkstack__server__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack server run commands' commands "$@" +} +(( $+functions[_zkstack__server__wait_commands] )) || +_zkstack__server__wait_commands() { + local commands; commands=() + _describe -t commands 'zkstack server wait commands' commands "$@" +} +(( $+functions[_zkstack__update_commands] )) || +_zkstack__update_commands() { + local commands; commands=() + _describe -t commands 'zkstack update commands' commands "$@" +} + +if [ "$funcstack[1]" = "_zkstack" ]; then + _zkstack "$@" +else + compdef _zkstack zkstack +fi diff --git a/zkstack_cli/crates/zkstack/completion/zkstack.fish b/zkstack_cli/crates/zkstack/completion/zkstack.fish new file mode 100644 index 000000000000..8a5b338fcda2 --- /dev/null +++ b/zkstack_cli/crates/zkstack/completion/zkstack.fish @@ -0,0 +1,784 @@ +# Print an optspec for argparse to handle cmd's options that are independent of any subcommand. +function __fish_zkstack_global_optspecs + string join \n v/verbose chain= ignore-prerequisites h/help V/version +end + +function __fish_zkstack_needs_command + # Figure out if the current invocation already has a command. + set -l cmd (commandline -opc) + set -e cmd[1] + argparse -s (__fish_zkstack_global_optspecs) -- $cmd 2>/dev/null + or return + if set -q argv[1] + # Also print the command, so this can be used to figure out what it is. + echo $argv[1] + return 1 + end + return 0 +end + +function __fish_zkstack_using_subcommand + set -l cmd (__fish_zkstack_needs_command) + test -z "$cmd" + and return 1 + contains -- $cmd[1] $argv +end + +complete -c zkstack -n "__fish_zkstack_needs_command" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_needs_command" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_needs_command" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_needs_command" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_needs_command" -s V -l version -d 'Print version' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "autocomplete" -d 'Create shell autocompletion files' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "ecosystem" -d 'Ecosystem related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "chain" -d 'Chain related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "dev" -d 'Supervisor related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "prover" -d 'Prover related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "server" -d 'Run server' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "external-node" -d 'External Node related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "containers" -d 'Run containers for local development' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "contract-verifier" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "portal" -d 'Run dapp-portal' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "explorer" -d 'Run block-explorer' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "consensus" -d 'Consensus utilities' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "update" -d 'Update ZKsync' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "markdown" -d 'Print markdown help' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -l generate -d 'The shell to generate the autocomplete script for' -r -f -a "{bash\t'',elvish\t'',fish\t'',powershell\t'',zsh\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -s o -l out -d 'The out directory to write the autocomplete script to' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "create" -d 'Create a new ecosystem and chain, setting necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "build-transactions" -d 'Create transactions to build ecosystem contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "init" -d 'Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "change-default-chain" -d 'Change the default chain' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "setup-observability" -d 'Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l ecosystem-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l l1-network -d 'L1 Network' -r -f -a "{localhost\t'',sepolia\t'',holesky\t'',mainnet\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l link-to-code -d 'Code link' -r -f -a "(__fish_complete_directories)" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l chain-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l chain-id -d 'Chain ID' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l prover-mode -d 'Prover options' -r -f -a "{no-proofs\t'',gpu\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l wallet-creation -d 'Wallet options' -r -f -a "{localhost\t'Load wallets from localhost mnemonic, they are funded for localhost env',random\t'Generate random wallets',empty\t'Generate placeholder wallets',in-file\t'Specify file with wallets'}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l wallet-path -d 'Wallet path' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l l1-batch-commit-data-generator-mode -d 'Commit data generation mode' -r -f -a "{rollup\t'',validium\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l base-token-address -d 'Base token address' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l base-token-price-nominator -d 'Base token nominator' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l base-token-price-denominator -d 'Base token denominator' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l set-as-default -d 'Set as default chain' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l evm-emulator -d 'Enable EVM emulator' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l start-containers -d 'Start reth and postgres containers after creation' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l legacy-bridge +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l sender -d 'Address of the transaction sender' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l l1-rpc-url -d 'L1 RPC URL' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -s o -l out -d 'Output directory for the generated files' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l deploy-erc20 -d 'Deploy ERC20 contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l deploy-ecosystem -d 'Deploy ecosystem contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l ecosystem-contracts-path -d 'Path to ecosystem contracts' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l l1-rpc-url -d 'L1 RPC URL' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l deploy-paymaster -d 'Deploy Paymaster contract' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l server-db-url -d 'Server database url without database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l server-db-name -d 'Server database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s o -l observability -d 'Enable Grafana' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s d -l dont-drop +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l ecosystem-only -d 'Initialize ecosystem only and skip chain initialization (chain can be initialized later with `chain init` subcommand)' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l dev -d 'Use defaults for all options and flags. Suitable for local development' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l no-port-reallocation -d 'Do not reallocate ports' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from change-default-chain" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from change-default-chain" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from change-default-chain" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from change-default-chain" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from setup-observability" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from setup-observability" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from setup-observability" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from setup-observability" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "create" -d 'Create a new ecosystem and chain, setting necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "build-transactions" -d 'Create transactions to build ecosystem contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "init" -d 'Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "change-default-chain" -d 'Change the default chain' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "setup-observability" -d 'Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "create" -d 'Create a new chain, setting the necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "build-transactions" -d 'Create unsigned transactions for chain deployment' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "init" -d 'Initialize chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "genesis" -d 'Run server genesis' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "register-chain" -d 'Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note: After completion, L2 governor can accept ownership by running `accept-chain-ownership`' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "deploy-l2-contracts" -d 'Deploy all L2 contracts (executed by L1 governor)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "accept-chain-ownership" -d 'Accept ownership of L2 chain (executed by L2 governor). This command should be run after `register-chain` to accept ownership of newly created DiamondProxy contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "initialize-bridges" -d 'Initialize bridges on L2' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "deploy-consensus-registry" -d 'Deploy L2 consensus registry' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "deploy-multicall3" -d 'Deploy L2 multicall3' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "deploy-timestamp-asserter" -d 'Deploy L2 TimestampAsserter' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "deploy-upgrader" -d 'Deploy Default Upgrader' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "deploy-paymaster" -d 'Deploy paymaster smart contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "update-token-multiplier-setter" -d 'Update Token Multiplier Setter address on L1' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l chain-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l chain-id -d 'Chain ID' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l prover-mode -d 'Prover options' -r -f -a "{no-proofs\t'',gpu\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l wallet-creation -d 'Wallet options' -r -f -a "{localhost\t'Load wallets from localhost mnemonic, they are funded for localhost env',random\t'Generate random wallets',empty\t'Generate placeholder wallets',in-file\t'Specify file with wallets'}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l wallet-path -d 'Wallet path' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l l1-batch-commit-data-generator-mode -d 'Commit data generation mode' -r -f -a "{rollup\t'',validium\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l base-token-address -d 'Base token address' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l base-token-price-nominator -d 'Base token nominator' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l base-token-price-denominator -d 'Base token denominator' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l set-as-default -d 'Set as default chain' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l evm-emulator -d 'Enable EVM emulator' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l legacy-bridge +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -s o -l out -d 'Output directory for the generated files' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l l1-rpc-url -d 'L1 RPC URL' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l server-db-url -d 'Server database url without database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l server-db-name -d 'Server database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l deploy-paymaster -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l l1-rpc-url -d 'L1 RPC URL' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s d -l dont-drop +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l no-port-reallocation -d 'Do not reallocate ports' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l dev -d 'Use defaults for all options and flags. Suitable for local development' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -f -a "configs" -d 'Initialize chain configs' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l server-db-url -d 'Server database url without database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l server-db-name -d 'Server database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s d -l dev -d 'Use default database urls and names' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s d -l dont-drop +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -f -a "init-database" -d 'Initialize databases' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -f -a "server" -d 'Runs server genesis' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-timestamp-asserter" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-timestamp-asserter" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-timestamp-asserter" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-timestamp-asserter" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-timestamp-asserter" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-timestamp-asserter" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-timestamp-asserter" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-timestamp-asserter" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-timestamp-asserter" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-timestamp-asserter" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "create" -d 'Create a new chain, setting the necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "build-transactions" -d 'Create unsigned transactions for chain deployment' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "init" -d 'Initialize chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "genesis" -d 'Run server genesis' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "register-chain" -d 'Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note: After completion, L2 governor can accept ownership by running `accept-chain-ownership`' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-l2-contracts" -d 'Deploy all L2 contracts (executed by L1 governor)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "accept-chain-ownership" -d 'Accept ownership of L2 chain (executed by L2 governor). This command should be run after `register-chain` to accept ownership of newly created DiamondProxy contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "initialize-bridges" -d 'Initialize bridges on L2' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-consensus-registry" -d 'Deploy L2 consensus registry' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-multicall3" -d 'Deploy L2 multicall3' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-timestamp-asserter" -d 'Deploy L2 TimestampAsserter' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-upgrader" -d 'Deploy Default Upgrader' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-paymaster" -d 'Deploy paymaster smart contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "update-token-multiplier-setter" -d 'Update Token Multiplier Setter address on L1' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "database" -d 'Database related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "test" -d 'Run tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "clean" -d 'Clean artifacts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "snapshot" -d 'Snapshots creator' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "lint" -d 'Lint code' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "fmt" -d 'Format code' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "prover" -d 'Protocol version used by provers' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "contracts" -d 'Build contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "config-writer" -d 'Overwrite general config' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "send-transactions" -d 'Send transactions from file' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "status" -d 'Get status of the server' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "generate-genesis" -d 'Generate new genesis file based on current contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "check-sqlx-data" -d 'Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "drop" -d 'Drop databases. If no databases are selected, all databases will be dropped.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "migrate" -d 'Migrate databases. If no databases are selected, all databases will be migrated.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "new-migration" -d 'Create new migration' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "prepare" -d 'Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "reset" -d 'Reset databases. If no databases are selected, all databases will be reset.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "setup" -d 'Setup databases. If no databases are selected, all databases will be setup.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "integration" -d 'Run integration tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "fees" -d 'Run fees test' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "revert" -d 'Run revert tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "recovery" -d 'Run recovery tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "upgrade" -d 'Run upgrade tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "build" -d 'Build all test dependencies' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "rust" -d 'Run unit-tests, accepts optional cargo test flags' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "l1-contracts" -d 'Run L1 contracts tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "prover" -d 'Run prover tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "wallet" -d 'Print test wallets information' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "loadtest" -d 'Run loadtest' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -f -a "all" -d 'Remove containers and contracts cache' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -f -a "containers" -d 'Remove containers and docker volumes' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -f -a "contracts-cache" -d 'Remove contracts caches' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -f -a "create" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -s t -l targets -r -f -a "{md\t'',sol\t'',js\t'',ts\t'',rs\t'',contracts\t'',autocompletion\t'',rust-toolchain\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -s c -l check +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -s c -l check +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -f -a "rustfmt" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -f -a "contract" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -f -a "prettier" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -f -a "info" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -f -a "insert-batch" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -f -a "insert-version" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l l1-contracts -d 'Build L1 contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l l2-contracts -d 'Build L2 contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l system-contracts -d 'Build system contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -s p -l path -d 'Path to the config file to override' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l file -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l private-key -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l l1-rpc-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l confirmations -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -s u -l url -d 'URL of the health check endpoint' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -f -a "ports" -d 'Show used ports' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from generate-genesis" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from generate-genesis" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from generate-genesis" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from generate-genesis" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "database" -d 'Database related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "test" -d 'Run tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "clean" -d 'Clean artifacts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "snapshot" -d 'Snapshots creator' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "lint" -d 'Lint code' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "fmt" -d 'Format code' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "prover" -d 'Protocol version used by provers' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "contracts" -d 'Build contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "config-writer" -d 'Overwrite general config' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "send-transactions" -d 'Send transactions from file' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "status" -d 'Get status of the server' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "generate-genesis" -d 'Generate new genesis file based on current contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "init" -d 'Initialize prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "setup-keys" -d 'Generate setup keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "run" -d 'Run prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "init-bellman-cuda" -d 'Initialize bellman-cuda' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "compressor-keys" -d 'Download compressor keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l proof-store-dir -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l bucket-base-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l credentials-file -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l bucket-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l location -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l project-id -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l shall-save-to-public-bucket -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-store-dir -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-bucket-base-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-credentials-file -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-bucket-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-location -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-project-id -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l bellman-cuda-dir -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l bellman-cuda -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l setup-compressor-key -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l path -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l region -r -f -a "{us\t'',europe\t'',asia\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l mode -r -f -a "{download\t'',generate\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l setup-keys -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l setup-database -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l prover-db-url -d 'Prover database url without database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l prover-db-name -d 'Prover database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -s u -l use-default -d 'Use default database urls and names' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -s d -l dont-drop -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l cloud-type -r -f -a "{gcp\t'',local\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l dev +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l clone +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -l region -r -f -a "{us\t'',europe\t'',asia\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -l mode -r -f -a "{download\t'',generate\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l component -r -f -a "{gateway\t'',witness-generator\t'',witness-vector-generator\t'',prover\t'',circuit-prover\t'',compressor\t'',prover-job-monitor\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l round -r -f -a "{all-rounds\t'',basic-circuits\t'',leaf-aggregation\t'',node-aggregation\t'',recursion-tip\t'',scheduler\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l threads -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l max-allocation -d 'Memory allocation limit in bytes (for prover component)' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -s l -l light-wvg-count -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -s h -l heavy-wvg-count -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -s m -l max-allocation -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l docker -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l tag -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -l bellman-cuda-dir -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -l clone +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -l path -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "init" -d 'Initialize prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "setup-keys" -d 'Generate setup keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "run" -d 'Run prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "init-bellman-cuda" -d 'Initialize bellman-cuda' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "compressor-keys" -d 'Download compressor keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and not __fish_seen_subcommand_from build run wait help" -l components -d 'Components of server to run' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and not __fish_seen_subcommand_from build run wait help" -s a -l additional-args -d 'Additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and not __fish_seen_subcommand_from build run wait help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and not __fish_seen_subcommand_from build run wait help" -l genesis -d 'Run server in genesis mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and not __fish_seen_subcommand_from build run wait help" -l uring -d 'Enables uring support for RocksDB' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and not __fish_seen_subcommand_from build run wait help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and not __fish_seen_subcommand_from build run wait help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and not __fish_seen_subcommand_from build run wait help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and not __fish_seen_subcommand_from build run wait help" -f -a "build" -d 'Builds server' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and not __fish_seen_subcommand_from build run wait help" -f -a "run" -d 'Runs server' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and not __fish_seen_subcommand_from build run wait help" -f -a "wait" -d 'Waits for server to start' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and not __fish_seen_subcommand_from build run wait help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from build" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from build" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from build" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from build" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from run" -l components -d 'Components of server to run' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from run" -s a -l additional-args -d 'Additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from run" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from run" -l genesis -d 'Run server in genesis mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from run" -l uring -d 'Enables uring support for RocksDB' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from run" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from run" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from run" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from wait" -s t -l timeout -d 'Wait timeout in seconds' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from wait" -l poll-interval -d 'Poll interval in milliseconds' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from wait" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from wait" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from wait" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from wait" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from help" -f -a "build" -d 'Builds server' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from help" -f -a "run" -d 'Runs server' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from help" -f -a "wait" -d 'Waits for server to start' +complete -c zkstack -n "__fish_zkstack_using_subcommand server; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init build run wait help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init build run wait help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init build run wait help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init build run wait help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init build run wait help" -f -a "configs" -d 'Prepare configs for EN' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init build run wait help" -f -a "init" -d 'Init databases' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init build run wait help" -f -a "build" -d 'Build external node' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init build run wait help" -f -a "run" -d 'Run external node' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init build run wait help" -f -a "wait" -d 'Wait for external node to start' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init build run wait help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l db-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l db-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l l1-rpc-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -s u -l use-default -d 'Use default database urls and names' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from build" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from build" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from build" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from build" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l components -d 'Components of server to run' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l enable-consensus -d 'Enable consensus' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -s a -l additional-args -d 'Additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l reinit +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from wait" -s t -l timeout -d 'Wait timeout in seconds' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from wait" -l poll-interval -d 'Poll interval in milliseconds' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from wait" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from wait" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from wait" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from wait" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "configs" -d 'Prepare configs for EN' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "init" -d 'Init databases' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "build" -d 'Build external node' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "run" -d 'Run external node' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "wait" -d 'Wait for external node to start' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -s o -l observability -d 'Enable Grafana' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from build run wait init help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from build run wait init help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from build run wait init help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from build run wait init help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from build run wait init help" -f -a "build" -d 'Build contract verifier binary' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from build run wait init help" -f -a "run" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from build run wait init help" -f -a "wait" -d 'Wait for contract verifier to start' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from build run wait init help" -f -a "init" -d 'Download required binaries for contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from build run wait init help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from build" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from build" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from build" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from build" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from run" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from run" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from run" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from run" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from wait" -s t -l timeout -d 'Wait timeout in seconds' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from wait" -l poll-interval -d 'Poll interval in milliseconds' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from wait" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from wait" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from wait" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from wait" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l zksolc-version -d 'Version of zksolc to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l zkvyper-version -d 'Version of zkvyper to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l solc-version -d 'Version of solc to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l era-vm-solc-version -d 'Version of era vm solc to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l vyper-version -d 'Version of vyper to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l only -d 'Install only provided compilers' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from help" -f -a "build" -d 'Build contract verifier binary' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from help" -f -a "run" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from help" -f -a "wait" -d 'Wait for contract verifier to start' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from help" -f -a "init" -d 'Download required binaries for contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand portal" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand portal" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand portal" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand portal" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -f -a "init" -d 'Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -f -a "run-backend" -d 'Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -f -a "run" -d 'Run explorer app' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run-backend" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run-backend" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run-backend" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run-backend" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from help" -f -a "init" -d 'Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from help" -f -a "run-backend" -d 'Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from help" -f -a "run" -d 'Run explorer app' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee wait-for-registry help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee wait-for-registry help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee wait-for-registry help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee wait-for-registry help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee wait-for-registry help" -f -a "set-attester-committee" -d 'Sets the attester committee in the consensus registry contract to `consensus.genesis_spec.attesters` in general.yaml' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee wait-for-registry help" -f -a "get-attester-committee" -d 'Fetches the attester committee from the consensus registry contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee wait-for-registry help" -f -a "wait-for-registry" -d 'Wait until the consensus registry contract is deployed to L2' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee wait-for-registry help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -l from-file -d 'Sets the attester committee in the consensus registry contract to the committee in the yaml file. File format is definied in `commands/consensus/proto/mod.proto`' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -l from-genesis -d 'Sets the attester committee in the consensus registry contract to `consensus.genesis_spec.attesters` in general.yaml' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from get-attester-committee" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from get-attester-committee" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from get-attester-committee" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from get-attester-committee" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from wait-for-registry" -s t -l timeout -d 'Wait timeout in seconds' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from wait-for-registry" -l poll-interval -d 'Poll interval in milliseconds' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from wait-for-registry" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from wait-for-registry" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from wait-for-registry" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from wait-for-registry" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from help" -f -a "set-attester-committee" -d 'Sets the attester committee in the consensus registry contract to `consensus.genesis_spec.attesters` in general.yaml' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from help" -f -a "get-attester-committee" -d 'Fetches the attester committee from the consensus registry contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from help" -f -a "wait-for-registry" -d 'Wait until the consensus registry contract is deployed to L2' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -s c -l only-config -d 'Update only the config files' +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand markdown" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand markdown" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand markdown" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand markdown" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "autocomplete" -d 'Create shell autocompletion files' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "ecosystem" -d 'Ecosystem related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "chain" -d 'Chain related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "dev" -d 'Supervisor related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "prover" -d 'Prover related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "server" -d 'Run server' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "external-node" -d 'External Node related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "containers" -d 'Run containers for local development' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "contract-verifier" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "portal" -d 'Run dapp-portal' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "explorer" -d 'Run block-explorer' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "consensus" -d 'Consensus utilities' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "update" -d 'Update ZKsync' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "markdown" -d 'Print markdown help' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "create" -d 'Create a new ecosystem and chain, setting necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "build-transactions" -d 'Create transactions to build ecosystem contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "init" -d 'Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "change-default-chain" -d 'Change the default chain' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "setup-observability" -d 'Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "create" -d 'Create a new chain, setting the necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "build-transactions" -d 'Create unsigned transactions for chain deployment' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "init" -d 'Initialize chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "genesis" -d 'Run server genesis' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "register-chain" -d 'Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note: After completion, L2 governor can accept ownership by running `accept-chain-ownership`' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-l2-contracts" -d 'Deploy all L2 contracts (executed by L1 governor)' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "accept-chain-ownership" -d 'Accept ownership of L2 chain (executed by L2 governor). This command should be run after `register-chain` to accept ownership of newly created DiamondProxy contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "initialize-bridges" -d 'Initialize bridges on L2' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-consensus-registry" -d 'Deploy L2 consensus registry' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-multicall3" -d 'Deploy L2 multicall3' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-timestamp-asserter" -d 'Deploy L2 TimestampAsserter' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-upgrader" -d 'Deploy Default Upgrader' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-paymaster" -d 'Deploy paymaster smart contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "update-token-multiplier-setter" -d 'Update Token Multiplier Setter address on L1' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "database" -d 'Database related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "test" -d 'Run tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "clean" -d 'Clean artifacts' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "snapshot" -d 'Snapshots creator' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "lint" -d 'Lint code' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "fmt" -d 'Format code' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "prover" -d 'Protocol version used by provers' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "contracts" -d 'Build contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "config-writer" -d 'Overwrite general config' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "send-transactions" -d 'Send transactions from file' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "status" -d 'Get status of the server' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "generate-genesis" -d 'Generate new genesis file based on current contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "init" -d 'Initialize prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "setup-keys" -d 'Generate setup keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "run" -d 'Run prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "init-bellman-cuda" -d 'Initialize bellman-cuda' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "compressor-keys" -d 'Download compressor keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from server" -f -a "build" -d 'Builds server' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from server" -f -a "run" -d 'Runs server' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from server" -f -a "wait" -d 'Waits for server to start' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from external-node" -f -a "configs" -d 'Prepare configs for EN' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from external-node" -f -a "init" -d 'Init databases' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from external-node" -f -a "build" -d 'Build external node' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from external-node" -f -a "run" -d 'Run external node' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from external-node" -f -a "wait" -d 'Wait for external node to start' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from contract-verifier" -f -a "build" -d 'Build contract verifier binary' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from contract-verifier" -f -a "run" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from contract-verifier" -f -a "wait" -d 'Wait for contract verifier to start' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from contract-verifier" -f -a "init" -d 'Download required binaries for contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from explorer" -f -a "init" -d 'Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from explorer" -f -a "run-backend" -d 'Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from explorer" -f -a "run" -d 'Run explorer app' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from consensus" -f -a "set-attester-committee" -d 'Sets the attester committee in the consensus registry contract to `consensus.genesis_spec.attesters` in general.yaml' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from consensus" -f -a "get-attester-committee" -d 'Fetches the attester committee from the consensus registry contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from consensus" -f -a "wait-for-registry" -d 'Wait until the consensus registry contract is deployed to L2' diff --git a/zkstack_cli/crates/zkstack/completion/zkstack.sh b/zkstack_cli/crates/zkstack/completion/zkstack.sh new file mode 100644 index 000000000000..bb373c3f63eb --- /dev/null +++ b/zkstack_cli/crates/zkstack/completion/zkstack.sh @@ -0,0 +1,7631 @@ +_zkstack() { + local i cur prev opts cmd + COMPREPLY=() + cur="${COMP_WORDS[COMP_CWORD]}" + prev="${COMP_WORDS[COMP_CWORD-1]}" + cmd="" + opts="" + + for i in ${COMP_WORDS[@]} + do + case "${cmd},${i}" in + ",$1") + cmd="zkstack" + ;; + zkstack,autocomplete) + cmd="zkstack__autocomplete" + ;; + zkstack,chain) + cmd="zkstack__chain" + ;; + zkstack,consensus) + cmd="zkstack__consensus" + ;; + zkstack,containers) + cmd="zkstack__containers" + ;; + zkstack,contract-verifier) + cmd="zkstack__contract__verifier" + ;; + zkstack,dev) + cmd="zkstack__dev" + ;; + zkstack,ecosystem) + cmd="zkstack__ecosystem" + ;; + zkstack,explorer) + cmd="zkstack__explorer" + ;; + zkstack,external-node) + cmd="zkstack__external__node" + ;; + zkstack,help) + cmd="zkstack__help" + ;; + zkstack,markdown) + cmd="zkstack__markdown" + ;; + zkstack,portal) + cmd="zkstack__portal" + ;; + zkstack,prover) + cmd="zkstack__prover" + ;; + zkstack,server) + cmd="zkstack__server" + ;; + zkstack,update) + cmd="zkstack__update" + ;; + zkstack__chain,accept-chain-ownership) + cmd="zkstack__chain__accept__chain__ownership" + ;; + zkstack__chain,build-transactions) + cmd="zkstack__chain__build__transactions" + ;; + zkstack__chain,create) + cmd="zkstack__chain__create" + ;; + zkstack__chain,deploy-consensus-registry) + cmd="zkstack__chain__deploy__consensus__registry" + ;; + zkstack__chain,deploy-l2-contracts) + cmd="zkstack__chain__deploy__l2__contracts" + ;; + zkstack__chain,deploy-multicall3) + cmd="zkstack__chain__deploy__multicall3" + ;; + zkstack__chain,deploy-paymaster) + cmd="zkstack__chain__deploy__paymaster" + ;; + zkstack__chain,deploy-timestamp-asserter) + cmd="zkstack__chain__deploy__timestamp__asserter" + ;; + zkstack__chain,deploy-upgrader) + cmd="zkstack__chain__deploy__upgrader" + ;; + zkstack__chain,genesis) + cmd="zkstack__chain__genesis" + ;; + zkstack__chain,help) + cmd="zkstack__chain__help" + ;; + zkstack__chain,init) + cmd="zkstack__chain__init" + ;; + zkstack__chain,initialize-bridges) + cmd="zkstack__chain__initialize__bridges" + ;; + zkstack__chain,register-chain) + cmd="zkstack__chain__register__chain" + ;; + zkstack__chain,update-token-multiplier-setter) + cmd="zkstack__chain__update__token__multiplier__setter" + ;; + zkstack__chain__genesis,help) + cmd="zkstack__chain__genesis__help" + ;; + zkstack__chain__genesis,init-database) + cmd="zkstack__chain__genesis__init__database" + ;; + zkstack__chain__genesis,server) + cmd="zkstack__chain__genesis__server" + ;; + zkstack__chain__genesis__help,help) + cmd="zkstack__chain__genesis__help__help" + ;; + zkstack__chain__genesis__help,init-database) + cmd="zkstack__chain__genesis__help__init__database" + ;; + zkstack__chain__genesis__help,server) + cmd="zkstack__chain__genesis__help__server" + ;; + zkstack__chain__help,accept-chain-ownership) + cmd="zkstack__chain__help__accept__chain__ownership" + ;; + zkstack__chain__help,build-transactions) + cmd="zkstack__chain__help__build__transactions" + ;; + zkstack__chain__help,create) + cmd="zkstack__chain__help__create" + ;; + zkstack__chain__help,deploy-consensus-registry) + cmd="zkstack__chain__help__deploy__consensus__registry" + ;; + zkstack__chain__help,deploy-l2-contracts) + cmd="zkstack__chain__help__deploy__l2__contracts" + ;; + zkstack__chain__help,deploy-multicall3) + cmd="zkstack__chain__help__deploy__multicall3" + ;; + zkstack__chain__help,deploy-paymaster) + cmd="zkstack__chain__help__deploy__paymaster" + ;; + zkstack__chain__help,deploy-timestamp-asserter) + cmd="zkstack__chain__help__deploy__timestamp__asserter" + ;; + zkstack__chain__help,deploy-upgrader) + cmd="zkstack__chain__help__deploy__upgrader" + ;; + zkstack__chain__help,genesis) + cmd="zkstack__chain__help__genesis" + ;; + zkstack__chain__help,help) + cmd="zkstack__chain__help__help" + ;; + zkstack__chain__help,init) + cmd="zkstack__chain__help__init" + ;; + zkstack__chain__help,initialize-bridges) + cmd="zkstack__chain__help__initialize__bridges" + ;; + zkstack__chain__help,register-chain) + cmd="zkstack__chain__help__register__chain" + ;; + zkstack__chain__help,update-token-multiplier-setter) + cmd="zkstack__chain__help__update__token__multiplier__setter" + ;; + zkstack__chain__help__genesis,init-database) + cmd="zkstack__chain__help__genesis__init__database" + ;; + zkstack__chain__help__genesis,server) + cmd="zkstack__chain__help__genesis__server" + ;; + zkstack__chain__help__init,configs) + cmd="zkstack__chain__help__init__configs" + ;; + zkstack__chain__init,configs) + cmd="zkstack__chain__init__configs" + ;; + zkstack__chain__init,help) + cmd="zkstack__chain__init__help" + ;; + zkstack__chain__init__help,configs) + cmd="zkstack__chain__init__help__configs" + ;; + zkstack__chain__init__help,help) + cmd="zkstack__chain__init__help__help" + ;; + zkstack__consensus,get-attester-committee) + cmd="zkstack__consensus__get__attester__committee" + ;; + zkstack__consensus,help) + cmd="zkstack__consensus__help" + ;; + zkstack__consensus,set-attester-committee) + cmd="zkstack__consensus__set__attester__committee" + ;; + zkstack__consensus,wait-for-registry) + cmd="zkstack__consensus__wait__for__registry" + ;; + zkstack__consensus__help,get-attester-committee) + cmd="zkstack__consensus__help__get__attester__committee" + ;; + zkstack__consensus__help,help) + cmd="zkstack__consensus__help__help" + ;; + zkstack__consensus__help,set-attester-committee) + cmd="zkstack__consensus__help__set__attester__committee" + ;; + zkstack__consensus__help,wait-for-registry) + cmd="zkstack__consensus__help__wait__for__registry" + ;; + zkstack__contract__verifier,build) + cmd="zkstack__contract__verifier__build" + ;; + zkstack__contract__verifier,help) + cmd="zkstack__contract__verifier__help" + ;; + zkstack__contract__verifier,init) + cmd="zkstack__contract__verifier__init" + ;; + zkstack__contract__verifier,run) + cmd="zkstack__contract__verifier__run" + ;; + zkstack__contract__verifier,wait) + cmd="zkstack__contract__verifier__wait" + ;; + zkstack__contract__verifier__help,build) + cmd="zkstack__contract__verifier__help__build" + ;; + zkstack__contract__verifier__help,help) + cmd="zkstack__contract__verifier__help__help" + ;; + zkstack__contract__verifier__help,init) + cmd="zkstack__contract__verifier__help__init" + ;; + zkstack__contract__verifier__help,run) + cmd="zkstack__contract__verifier__help__run" + ;; + zkstack__contract__verifier__help,wait) + cmd="zkstack__contract__verifier__help__wait" + ;; + zkstack__dev,clean) + cmd="zkstack__dev__clean" + ;; + zkstack__dev,config-writer) + cmd="zkstack__dev__config__writer" + ;; + zkstack__dev,contracts) + cmd="zkstack__dev__contracts" + ;; + zkstack__dev,database) + cmd="zkstack__dev__database" + ;; + zkstack__dev,fmt) + cmd="zkstack__dev__fmt" + ;; + zkstack__dev,generate-genesis) + cmd="zkstack__dev__generate__genesis" + ;; + zkstack__dev,help) + cmd="zkstack__dev__help" + ;; + zkstack__dev,lint) + cmd="zkstack__dev__lint" + ;; + zkstack__dev,prover) + cmd="zkstack__dev__prover" + ;; + zkstack__dev,send-transactions) + cmd="zkstack__dev__send__transactions" + ;; + zkstack__dev,snapshot) + cmd="zkstack__dev__snapshot" + ;; + zkstack__dev,status) + cmd="zkstack__dev__status" + ;; + zkstack__dev,test) + cmd="zkstack__dev__test" + ;; + zkstack__dev__clean,all) + cmd="zkstack__dev__clean__all" + ;; + zkstack__dev__clean,containers) + cmd="zkstack__dev__clean__containers" + ;; + zkstack__dev__clean,contracts-cache) + cmd="zkstack__dev__clean__contracts__cache" + ;; + zkstack__dev__clean,help) + cmd="zkstack__dev__clean__help" + ;; + zkstack__dev__clean__help,all) + cmd="zkstack__dev__clean__help__all" + ;; + zkstack__dev__clean__help,containers) + cmd="zkstack__dev__clean__help__containers" + ;; + zkstack__dev__clean__help,contracts-cache) + cmd="zkstack__dev__clean__help__contracts__cache" + ;; + zkstack__dev__clean__help,help) + cmd="zkstack__dev__clean__help__help" + ;; + zkstack__dev__database,check-sqlx-data) + cmd="zkstack__dev__database__check__sqlx__data" + ;; + zkstack__dev__database,drop) + cmd="zkstack__dev__database__drop" + ;; + zkstack__dev__database,help) + cmd="zkstack__dev__database__help" + ;; + zkstack__dev__database,migrate) + cmd="zkstack__dev__database__migrate" + ;; + zkstack__dev__database,new-migration) + cmd="zkstack__dev__database__new__migration" + ;; + zkstack__dev__database,prepare) + cmd="zkstack__dev__database__prepare" + ;; + zkstack__dev__database,reset) + cmd="zkstack__dev__database__reset" + ;; + zkstack__dev__database,setup) + cmd="zkstack__dev__database__setup" + ;; + zkstack__dev__database__help,check-sqlx-data) + cmd="zkstack__dev__database__help__check__sqlx__data" + ;; + zkstack__dev__database__help,drop) + cmd="zkstack__dev__database__help__drop" + ;; + zkstack__dev__database__help,help) + cmd="zkstack__dev__database__help__help" + ;; + zkstack__dev__database__help,migrate) + cmd="zkstack__dev__database__help__migrate" + ;; + zkstack__dev__database__help,new-migration) + cmd="zkstack__dev__database__help__new__migration" + ;; + zkstack__dev__database__help,prepare) + cmd="zkstack__dev__database__help__prepare" + ;; + zkstack__dev__database__help,reset) + cmd="zkstack__dev__database__help__reset" + ;; + zkstack__dev__database__help,setup) + cmd="zkstack__dev__database__help__setup" + ;; + zkstack__dev__fmt,contract) + cmd="zkstack__dev__fmt__contract" + ;; + zkstack__dev__fmt,help) + cmd="zkstack__dev__fmt__help" + ;; + zkstack__dev__fmt,prettier) + cmd="zkstack__dev__fmt__prettier" + ;; + zkstack__dev__fmt,rustfmt) + cmd="zkstack__dev__fmt__rustfmt" + ;; + zkstack__dev__fmt__help,contract) + cmd="zkstack__dev__fmt__help__contract" + ;; + zkstack__dev__fmt__help,help) + cmd="zkstack__dev__fmt__help__help" + ;; + zkstack__dev__fmt__help,prettier) + cmd="zkstack__dev__fmt__help__prettier" + ;; + zkstack__dev__fmt__help,rustfmt) + cmd="zkstack__dev__fmt__help__rustfmt" + ;; + zkstack__dev__help,clean) + cmd="zkstack__dev__help__clean" + ;; + zkstack__dev__help,config-writer) + cmd="zkstack__dev__help__config__writer" + ;; + zkstack__dev__help,contracts) + cmd="zkstack__dev__help__contracts" + ;; + zkstack__dev__help,database) + cmd="zkstack__dev__help__database" + ;; + zkstack__dev__help,fmt) + cmd="zkstack__dev__help__fmt" + ;; + zkstack__dev__help,generate-genesis) + cmd="zkstack__dev__help__generate__genesis" + ;; + zkstack__dev__help,help) + cmd="zkstack__dev__help__help" + ;; + zkstack__dev__help,lint) + cmd="zkstack__dev__help__lint" + ;; + zkstack__dev__help,prover) + cmd="zkstack__dev__help__prover" + ;; + zkstack__dev__help,send-transactions) + cmd="zkstack__dev__help__send__transactions" + ;; + zkstack__dev__help,snapshot) + cmd="zkstack__dev__help__snapshot" + ;; + zkstack__dev__help,status) + cmd="zkstack__dev__help__status" + ;; + zkstack__dev__help,test) + cmd="zkstack__dev__help__test" + ;; + zkstack__dev__help__clean,all) + cmd="zkstack__dev__help__clean__all" + ;; + zkstack__dev__help__clean,containers) + cmd="zkstack__dev__help__clean__containers" + ;; + zkstack__dev__help__clean,contracts-cache) + cmd="zkstack__dev__help__clean__contracts__cache" + ;; + zkstack__dev__help__database,check-sqlx-data) + cmd="zkstack__dev__help__database__check__sqlx__data" + ;; + zkstack__dev__help__database,drop) + cmd="zkstack__dev__help__database__drop" + ;; + zkstack__dev__help__database,migrate) + cmd="zkstack__dev__help__database__migrate" + ;; + zkstack__dev__help__database,new-migration) + cmd="zkstack__dev__help__database__new__migration" + ;; + zkstack__dev__help__database,prepare) + cmd="zkstack__dev__help__database__prepare" + ;; + zkstack__dev__help__database,reset) + cmd="zkstack__dev__help__database__reset" + ;; + zkstack__dev__help__database,setup) + cmd="zkstack__dev__help__database__setup" + ;; + zkstack__dev__help__fmt,contract) + cmd="zkstack__dev__help__fmt__contract" + ;; + zkstack__dev__help__fmt,prettier) + cmd="zkstack__dev__help__fmt__prettier" + ;; + zkstack__dev__help__fmt,rustfmt) + cmd="zkstack__dev__help__fmt__rustfmt" + ;; + zkstack__dev__help__prover,info) + cmd="zkstack__dev__help__prover__info" + ;; + zkstack__dev__help__prover,insert-batch) + cmd="zkstack__dev__help__prover__insert__batch" + ;; + zkstack__dev__help__prover,insert-version) + cmd="zkstack__dev__help__prover__insert__version" + ;; + zkstack__dev__help__snapshot,create) + cmd="zkstack__dev__help__snapshot__create" + ;; + zkstack__dev__help__status,ports) + cmd="zkstack__dev__help__status__ports" + ;; + zkstack__dev__help__test,build) + cmd="zkstack__dev__help__test__build" + ;; + zkstack__dev__help__test,fees) + cmd="zkstack__dev__help__test__fees" + ;; + zkstack__dev__help__test,integration) + cmd="zkstack__dev__help__test__integration" + ;; + zkstack__dev__help__test,l1-contracts) + cmd="zkstack__dev__help__test__l1__contracts" + ;; + zkstack__dev__help__test,loadtest) + cmd="zkstack__dev__help__test__loadtest" + ;; + zkstack__dev__help__test,prover) + cmd="zkstack__dev__help__test__prover" + ;; + zkstack__dev__help__test,recovery) + cmd="zkstack__dev__help__test__recovery" + ;; + zkstack__dev__help__test,revert) + cmd="zkstack__dev__help__test__revert" + ;; + zkstack__dev__help__test,rust) + cmd="zkstack__dev__help__test__rust" + ;; + zkstack__dev__help__test,upgrade) + cmd="zkstack__dev__help__test__upgrade" + ;; + zkstack__dev__help__test,wallet) + cmd="zkstack__dev__help__test__wallet" + ;; + zkstack__dev__prover,help) + cmd="zkstack__dev__prover__help" + ;; + zkstack__dev__prover,info) + cmd="zkstack__dev__prover__info" + ;; + zkstack__dev__prover,insert-batch) + cmd="zkstack__dev__prover__insert__batch" + ;; + zkstack__dev__prover,insert-version) + cmd="zkstack__dev__prover__insert__version" + ;; + zkstack__dev__prover__help,help) + cmd="zkstack__dev__prover__help__help" + ;; + zkstack__dev__prover__help,info) + cmd="zkstack__dev__prover__help__info" + ;; + zkstack__dev__prover__help,insert-batch) + cmd="zkstack__dev__prover__help__insert__batch" + ;; + zkstack__dev__prover__help,insert-version) + cmd="zkstack__dev__prover__help__insert__version" + ;; + zkstack__dev__snapshot,create) + cmd="zkstack__dev__snapshot__create" + ;; + zkstack__dev__snapshot,help) + cmd="zkstack__dev__snapshot__help" + ;; + zkstack__dev__snapshot__help,create) + cmd="zkstack__dev__snapshot__help__create" + ;; + zkstack__dev__snapshot__help,help) + cmd="zkstack__dev__snapshot__help__help" + ;; + zkstack__dev__status,help) + cmd="zkstack__dev__status__help" + ;; + zkstack__dev__status,ports) + cmd="zkstack__dev__status__ports" + ;; + zkstack__dev__status__help,help) + cmd="zkstack__dev__status__help__help" + ;; + zkstack__dev__status__help,ports) + cmd="zkstack__dev__status__help__ports" + ;; + zkstack__dev__test,build) + cmd="zkstack__dev__test__build" + ;; + zkstack__dev__test,fees) + cmd="zkstack__dev__test__fees" + ;; + zkstack__dev__test,help) + cmd="zkstack__dev__test__help" + ;; + zkstack__dev__test,integration) + cmd="zkstack__dev__test__integration" + ;; + zkstack__dev__test,l1-contracts) + cmd="zkstack__dev__test__l1__contracts" + ;; + zkstack__dev__test,loadtest) + cmd="zkstack__dev__test__loadtest" + ;; + zkstack__dev__test,prover) + cmd="zkstack__dev__test__prover" + ;; + zkstack__dev__test,recovery) + cmd="zkstack__dev__test__recovery" + ;; + zkstack__dev__test,revert) + cmd="zkstack__dev__test__revert" + ;; + zkstack__dev__test,rust) + cmd="zkstack__dev__test__rust" + ;; + zkstack__dev__test,upgrade) + cmd="zkstack__dev__test__upgrade" + ;; + zkstack__dev__test,wallet) + cmd="zkstack__dev__test__wallet" + ;; + zkstack__dev__test__help,build) + cmd="zkstack__dev__test__help__build" + ;; + zkstack__dev__test__help,fees) + cmd="zkstack__dev__test__help__fees" + ;; + zkstack__dev__test__help,help) + cmd="zkstack__dev__test__help__help" + ;; + zkstack__dev__test__help,integration) + cmd="zkstack__dev__test__help__integration" + ;; + zkstack__dev__test__help,l1-contracts) + cmd="zkstack__dev__test__help__l1__contracts" + ;; + zkstack__dev__test__help,loadtest) + cmd="zkstack__dev__test__help__loadtest" + ;; + zkstack__dev__test__help,prover) + cmd="zkstack__dev__test__help__prover" + ;; + zkstack__dev__test__help,recovery) + cmd="zkstack__dev__test__help__recovery" + ;; + zkstack__dev__test__help,revert) + cmd="zkstack__dev__test__help__revert" + ;; + zkstack__dev__test__help,rust) + cmd="zkstack__dev__test__help__rust" + ;; + zkstack__dev__test__help,upgrade) + cmd="zkstack__dev__test__help__upgrade" + ;; + zkstack__dev__test__help,wallet) + cmd="zkstack__dev__test__help__wallet" + ;; + zkstack__ecosystem,build-transactions) + cmd="zkstack__ecosystem__build__transactions" + ;; + zkstack__ecosystem,change-default-chain) + cmd="zkstack__ecosystem__change__default__chain" + ;; + zkstack__ecosystem,create) + cmd="zkstack__ecosystem__create" + ;; + zkstack__ecosystem,help) + cmd="zkstack__ecosystem__help" + ;; + zkstack__ecosystem,init) + cmd="zkstack__ecosystem__init" + ;; + zkstack__ecosystem,setup-observability) + cmd="zkstack__ecosystem__setup__observability" + ;; + zkstack__ecosystem__help,build-transactions) + cmd="zkstack__ecosystem__help__build__transactions" + ;; + zkstack__ecosystem__help,change-default-chain) + cmd="zkstack__ecosystem__help__change__default__chain" + ;; + zkstack__ecosystem__help,create) + cmd="zkstack__ecosystem__help__create" + ;; + zkstack__ecosystem__help,help) + cmd="zkstack__ecosystem__help__help" + ;; + zkstack__ecosystem__help,init) + cmd="zkstack__ecosystem__help__init" + ;; + zkstack__ecosystem__help,setup-observability) + cmd="zkstack__ecosystem__help__setup__observability" + ;; + zkstack__explorer,help) + cmd="zkstack__explorer__help" + ;; + zkstack__explorer,init) + cmd="zkstack__explorer__init" + ;; + zkstack__explorer,run) + cmd="zkstack__explorer__run" + ;; + zkstack__explorer,run-backend) + cmd="zkstack__explorer__run__backend" + ;; + zkstack__explorer__help,help) + cmd="zkstack__explorer__help__help" + ;; + zkstack__explorer__help,init) + cmd="zkstack__explorer__help__init" + ;; + zkstack__explorer__help,run) + cmd="zkstack__explorer__help__run" + ;; + zkstack__explorer__help,run-backend) + cmd="zkstack__explorer__help__run__backend" + ;; + zkstack__external__node,build) + cmd="zkstack__external__node__build" + ;; + zkstack__external__node,configs) + cmd="zkstack__external__node__configs" + ;; + zkstack__external__node,help) + cmd="zkstack__external__node__help" + ;; + zkstack__external__node,init) + cmd="zkstack__external__node__init" + ;; + zkstack__external__node,run) + cmd="zkstack__external__node__run" + ;; + zkstack__external__node,wait) + cmd="zkstack__external__node__wait" + ;; + zkstack__external__node__help,build) + cmd="zkstack__external__node__help__build" + ;; + zkstack__external__node__help,configs) + cmd="zkstack__external__node__help__configs" + ;; + zkstack__external__node__help,help) + cmd="zkstack__external__node__help__help" + ;; + zkstack__external__node__help,init) + cmd="zkstack__external__node__help__init" + ;; + zkstack__external__node__help,run) + cmd="zkstack__external__node__help__run" + ;; + zkstack__external__node__help,wait) + cmd="zkstack__external__node__help__wait" + ;; + zkstack__help,autocomplete) + cmd="zkstack__help__autocomplete" + ;; + zkstack__help,chain) + cmd="zkstack__help__chain" + ;; + zkstack__help,consensus) + cmd="zkstack__help__consensus" + ;; + zkstack__help,containers) + cmd="zkstack__help__containers" + ;; + zkstack__help,contract-verifier) + cmd="zkstack__help__contract__verifier" + ;; + zkstack__help,dev) + cmd="zkstack__help__dev" + ;; + zkstack__help,ecosystem) + cmd="zkstack__help__ecosystem" + ;; + zkstack__help,explorer) + cmd="zkstack__help__explorer" + ;; + zkstack__help,external-node) + cmd="zkstack__help__external__node" + ;; + zkstack__help,help) + cmd="zkstack__help__help" + ;; + zkstack__help,markdown) + cmd="zkstack__help__markdown" + ;; + zkstack__help,portal) + cmd="zkstack__help__portal" + ;; + zkstack__help,prover) + cmd="zkstack__help__prover" + ;; + zkstack__help,server) + cmd="zkstack__help__server" + ;; + zkstack__help,update) + cmd="zkstack__help__update" + ;; + zkstack__help__chain,accept-chain-ownership) + cmd="zkstack__help__chain__accept__chain__ownership" + ;; + zkstack__help__chain,build-transactions) + cmd="zkstack__help__chain__build__transactions" + ;; + zkstack__help__chain,create) + cmd="zkstack__help__chain__create" + ;; + zkstack__help__chain,deploy-consensus-registry) + cmd="zkstack__help__chain__deploy__consensus__registry" + ;; + zkstack__help__chain,deploy-l2-contracts) + cmd="zkstack__help__chain__deploy__l2__contracts" + ;; + zkstack__help__chain,deploy-multicall3) + cmd="zkstack__help__chain__deploy__multicall3" + ;; + zkstack__help__chain,deploy-paymaster) + cmd="zkstack__help__chain__deploy__paymaster" + ;; + zkstack__help__chain,deploy-timestamp-asserter) + cmd="zkstack__help__chain__deploy__timestamp__asserter" + ;; + zkstack__help__chain,deploy-upgrader) + cmd="zkstack__help__chain__deploy__upgrader" + ;; + zkstack__help__chain,genesis) + cmd="zkstack__help__chain__genesis" + ;; + zkstack__help__chain,init) + cmd="zkstack__help__chain__init" + ;; + zkstack__help__chain,initialize-bridges) + cmd="zkstack__help__chain__initialize__bridges" + ;; + zkstack__help__chain,register-chain) + cmd="zkstack__help__chain__register__chain" + ;; + zkstack__help__chain,update-token-multiplier-setter) + cmd="zkstack__help__chain__update__token__multiplier__setter" + ;; + zkstack__help__chain__genesis,init-database) + cmd="zkstack__help__chain__genesis__init__database" + ;; + zkstack__help__chain__genesis,server) + cmd="zkstack__help__chain__genesis__server" + ;; + zkstack__help__chain__init,configs) + cmd="zkstack__help__chain__init__configs" + ;; + zkstack__help__consensus,get-attester-committee) + cmd="zkstack__help__consensus__get__attester__committee" + ;; + zkstack__help__consensus,set-attester-committee) + cmd="zkstack__help__consensus__set__attester__committee" + ;; + zkstack__help__consensus,wait-for-registry) + cmd="zkstack__help__consensus__wait__for__registry" + ;; + zkstack__help__contract__verifier,build) + cmd="zkstack__help__contract__verifier__build" + ;; + zkstack__help__contract__verifier,init) + cmd="zkstack__help__contract__verifier__init" + ;; + zkstack__help__contract__verifier,run) + cmd="zkstack__help__contract__verifier__run" + ;; + zkstack__help__contract__verifier,wait) + cmd="zkstack__help__contract__verifier__wait" + ;; + zkstack__help__dev,clean) + cmd="zkstack__help__dev__clean" + ;; + zkstack__help__dev,config-writer) + cmd="zkstack__help__dev__config__writer" + ;; + zkstack__help__dev,contracts) + cmd="zkstack__help__dev__contracts" + ;; + zkstack__help__dev,database) + cmd="zkstack__help__dev__database" + ;; + zkstack__help__dev,fmt) + cmd="zkstack__help__dev__fmt" + ;; + zkstack__help__dev,generate-genesis) + cmd="zkstack__help__dev__generate__genesis" + ;; + zkstack__help__dev,lint) + cmd="zkstack__help__dev__lint" + ;; + zkstack__help__dev,prover) + cmd="zkstack__help__dev__prover" + ;; + zkstack__help__dev,send-transactions) + cmd="zkstack__help__dev__send__transactions" + ;; + zkstack__help__dev,snapshot) + cmd="zkstack__help__dev__snapshot" + ;; + zkstack__help__dev,status) + cmd="zkstack__help__dev__status" + ;; + zkstack__help__dev,test) + cmd="zkstack__help__dev__test" + ;; + zkstack__help__dev__clean,all) + cmd="zkstack__help__dev__clean__all" + ;; + zkstack__help__dev__clean,containers) + cmd="zkstack__help__dev__clean__containers" + ;; + zkstack__help__dev__clean,contracts-cache) + cmd="zkstack__help__dev__clean__contracts__cache" + ;; + zkstack__help__dev__database,check-sqlx-data) + cmd="zkstack__help__dev__database__check__sqlx__data" + ;; + zkstack__help__dev__database,drop) + cmd="zkstack__help__dev__database__drop" + ;; + zkstack__help__dev__database,migrate) + cmd="zkstack__help__dev__database__migrate" + ;; + zkstack__help__dev__database,new-migration) + cmd="zkstack__help__dev__database__new__migration" + ;; + zkstack__help__dev__database,prepare) + cmd="zkstack__help__dev__database__prepare" + ;; + zkstack__help__dev__database,reset) + cmd="zkstack__help__dev__database__reset" + ;; + zkstack__help__dev__database,setup) + cmd="zkstack__help__dev__database__setup" + ;; + zkstack__help__dev__fmt,contract) + cmd="zkstack__help__dev__fmt__contract" + ;; + zkstack__help__dev__fmt,prettier) + cmd="zkstack__help__dev__fmt__prettier" + ;; + zkstack__help__dev__fmt,rustfmt) + cmd="zkstack__help__dev__fmt__rustfmt" + ;; + zkstack__help__dev__prover,info) + cmd="zkstack__help__dev__prover__info" + ;; + zkstack__help__dev__prover,insert-batch) + cmd="zkstack__help__dev__prover__insert__batch" + ;; + zkstack__help__dev__prover,insert-version) + cmd="zkstack__help__dev__prover__insert__version" + ;; + zkstack__help__dev__snapshot,create) + cmd="zkstack__help__dev__snapshot__create" + ;; + zkstack__help__dev__status,ports) + cmd="zkstack__help__dev__status__ports" + ;; + zkstack__help__dev__test,build) + cmd="zkstack__help__dev__test__build" + ;; + zkstack__help__dev__test,fees) + cmd="zkstack__help__dev__test__fees" + ;; + zkstack__help__dev__test,integration) + cmd="zkstack__help__dev__test__integration" + ;; + zkstack__help__dev__test,l1-contracts) + cmd="zkstack__help__dev__test__l1__contracts" + ;; + zkstack__help__dev__test,loadtest) + cmd="zkstack__help__dev__test__loadtest" + ;; + zkstack__help__dev__test,prover) + cmd="zkstack__help__dev__test__prover" + ;; + zkstack__help__dev__test,recovery) + cmd="zkstack__help__dev__test__recovery" + ;; + zkstack__help__dev__test,revert) + cmd="zkstack__help__dev__test__revert" + ;; + zkstack__help__dev__test,rust) + cmd="zkstack__help__dev__test__rust" + ;; + zkstack__help__dev__test,upgrade) + cmd="zkstack__help__dev__test__upgrade" + ;; + zkstack__help__dev__test,wallet) + cmd="zkstack__help__dev__test__wallet" + ;; + zkstack__help__ecosystem,build-transactions) + cmd="zkstack__help__ecosystem__build__transactions" + ;; + zkstack__help__ecosystem,change-default-chain) + cmd="zkstack__help__ecosystem__change__default__chain" + ;; + zkstack__help__ecosystem,create) + cmd="zkstack__help__ecosystem__create" + ;; + zkstack__help__ecosystem,init) + cmd="zkstack__help__ecosystem__init" + ;; + zkstack__help__ecosystem,setup-observability) + cmd="zkstack__help__ecosystem__setup__observability" + ;; + zkstack__help__explorer,init) + cmd="zkstack__help__explorer__init" + ;; + zkstack__help__explorer,run) + cmd="zkstack__help__explorer__run" + ;; + zkstack__help__explorer,run-backend) + cmd="zkstack__help__explorer__run__backend" + ;; + zkstack__help__external__node,build) + cmd="zkstack__help__external__node__build" + ;; + zkstack__help__external__node,configs) + cmd="zkstack__help__external__node__configs" + ;; + zkstack__help__external__node,init) + cmd="zkstack__help__external__node__init" + ;; + zkstack__help__external__node,run) + cmd="zkstack__help__external__node__run" + ;; + zkstack__help__external__node,wait) + cmd="zkstack__help__external__node__wait" + ;; + zkstack__help__prover,compressor-keys) + cmd="zkstack__help__prover__compressor__keys" + ;; + zkstack__help__prover,init) + cmd="zkstack__help__prover__init" + ;; + zkstack__help__prover,init-bellman-cuda) + cmd="zkstack__help__prover__init__bellman__cuda" + ;; + zkstack__help__prover,run) + cmd="zkstack__help__prover__run" + ;; + zkstack__help__prover,setup-keys) + cmd="zkstack__help__prover__setup__keys" + ;; + zkstack__help__server,build) + cmd="zkstack__help__server__build" + ;; + zkstack__help__server,run) + cmd="zkstack__help__server__run" + ;; + zkstack__help__server,wait) + cmd="zkstack__help__server__wait" + ;; + zkstack__prover,compressor-keys) + cmd="zkstack__prover__compressor__keys" + ;; + zkstack__prover,help) + cmd="zkstack__prover__help" + ;; + zkstack__prover,init) + cmd="zkstack__prover__init" + ;; + zkstack__prover,init-bellman-cuda) + cmd="zkstack__prover__init__bellman__cuda" + ;; + zkstack__prover,run) + cmd="zkstack__prover__run" + ;; + zkstack__prover,setup-keys) + cmd="zkstack__prover__setup__keys" + ;; + zkstack__prover__help,compressor-keys) + cmd="zkstack__prover__help__compressor__keys" + ;; + zkstack__prover__help,help) + cmd="zkstack__prover__help__help" + ;; + zkstack__prover__help,init) + cmd="zkstack__prover__help__init" + ;; + zkstack__prover__help,init-bellman-cuda) + cmd="zkstack__prover__help__init__bellman__cuda" + ;; + zkstack__prover__help,run) + cmd="zkstack__prover__help__run" + ;; + zkstack__prover__help,setup-keys) + cmd="zkstack__prover__help__setup__keys" + ;; + zkstack__server,build) + cmd="zkstack__server__build" + ;; + zkstack__server,help) + cmd="zkstack__server__help" + ;; + zkstack__server,run) + cmd="zkstack__server__run" + ;; + zkstack__server,wait) + cmd="zkstack__server__wait" + ;; + zkstack__server__help,build) + cmd="zkstack__server__help__build" + ;; + zkstack__server__help,help) + cmd="zkstack__server__help__help" + ;; + zkstack__server__help,run) + cmd="zkstack__server__help__run" + ;; + zkstack__server__help,wait) + cmd="zkstack__server__help__wait" + ;; + *) + ;; + esac + done + + case "${cmd}" in + zkstack) + opts="-v -h -V --verbose --chain --ignore-prerequisites --help --version autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 1 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__autocomplete) + opts="-o -v -h --generate --out --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --generate) + COMPREPLY=($(compgen -W "bash elvish fish powershell zsh" -- "${cur}")) + return 0 + ;; + --out) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain) + opts="-v -h --verbose --chain --ignore-prerequisites --help create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__accept__chain__ownership) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__build__transactions) + opts="-o -a -v -h --out --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --l1-rpc-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --out) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__create) + opts="-v -h --chain-name --chain-id --prover-mode --wallet-creation --wallet-path --l1-batch-commit-data-generator-mode --base-token-address --base-token-price-nominator --base-token-price-denominator --set-as-default --legacy-bridge --evm-emulator --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain-id) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --prover-mode) + COMPREPLY=($(compgen -W "no-proofs gpu" -- "${cur}")) + return 0 + ;; + --wallet-creation) + COMPREPLY=($(compgen -W "localhost random empty in-file" -- "${cur}")) + return 0 + ;; + --wallet-path) + local oldifs + if [ -n "${IFS+x}" ]; then + oldifs="$IFS" + fi + IFS=$'\n' + COMPREPLY=($(compgen -f "${cur}")) + if [ -n "${oldifs+x}" ]; then + IFS="$oldifs" + fi + if [[ "${BASH_VERSINFO[0]}" -ge 4 ]]; then + compopt -o filenames + fi + return 0 + ;; + --l1-batch-commit-data-generator-mode) + COMPREPLY=($(compgen -W "rollup validium" -- "${cur}")) + return 0 + ;; + --base-token-address) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --base-token-price-nominator) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --base-token-price-denominator) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --set-as-default) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --evm-emulator) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__consensus__registry) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__l2__contracts) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__multicall3) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__paymaster) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__timestamp__asserter) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__upgrader) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis) + opts="-d -d -v -h --server-db-url --server-db-name --dev --dont-drop --verbose --chain --ignore-prerequisites --help init-database server help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__help) + opts="init-database server help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__help__init__database) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__help__server) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__init__database) + opts="-d -d -v -h --server-db-url --server-db-name --dev --dont-drop --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__server) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help) + opts="create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__accept__chain__ownership) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__build__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__consensus__registry) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__l2__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__multicall3) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__paymaster) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__timestamp__asserter) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__upgrader) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__genesis) + opts="init-database server" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__genesis__init__database) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__genesis__server) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__init) + opts="configs" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__init__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__initialize__bridges) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__register__chain) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__update__token__multiplier__setter) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init) + opts="-a -d -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --server-db-url --server-db-name --dont-drop --deploy-paymaster --l1-rpc-url --no-port-reallocation --dev --verbose --chain --ignore-prerequisites --help configs help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --deploy-paymaster) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init__configs) + opts="-d -d -v -h --server-db-url --server-db-name --dev --dont-drop --l1-rpc-url --no-port-reallocation --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init__help) + opts="configs help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init__help__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__initialize__bridges) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__register__chain) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__update__token__multiplier__setter) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus) + opts="-v -h --verbose --chain --ignore-prerequisites --help set-attester-committee get-attester-committee wait-for-registry help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__get__attester__committee) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__help) + opts="set-attester-committee get-attester-committee wait-for-registry help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__help__get__attester__committee) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__help__set__attester__committee) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__help__wait__for__registry) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__set__attester__committee) + opts="-v -h --from-genesis --from-file --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --from-file) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__wait__for__registry) + opts="-t -v -h --timeout --poll-interval --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --timeout) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -t) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --poll-interval) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__containers) + opts="-o -v -h --observability --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --observability) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier) + opts="-v -h --verbose --chain --ignore-prerequisites --help build run wait init help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__build) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help) + opts="build run wait init help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help__wait) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__init) + opts="-v -h --zksolc-version --zkvyper-version --solc-version --era-vm-solc-version --vyper-version --only --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --zksolc-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --zkvyper-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --solc-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --era-vm-solc-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --vyper-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__run) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__wait) + opts="-t -v -h --timeout --poll-interval --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --timeout) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -t) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --poll-interval) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev) + opts="-v -h --verbose --chain --ignore-prerequisites --help database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean) + opts="-v -h --verbose --chain --ignore-prerequisites --help all containers contracts-cache help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__all) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__containers) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__contracts__cache) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help) + opts="all containers contracts-cache help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help__all) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help__containers) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help__contracts__cache) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__config__writer) + opts="-p -v -h --path --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --path) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__contracts) + opts="-v -h --l1-contracts --l2-contracts --system-contracts --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --l1-contracts) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --l2-contracts) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --system-contracts) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database) + opts="-v -h --verbose --chain --ignore-prerequisites --help check-sqlx-data drop migrate new-migration prepare reset setup help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__check__sqlx__data) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__drop) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help) + opts="check-sqlx-data drop migrate new-migration prepare reset setup help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__check__sqlx__data) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__drop) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__migrate) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__new__migration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__prepare) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__reset) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__setup) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__migrate) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__new__migration) + opts="-v -h --database --name --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --database) + COMPREPLY=($(compgen -W "prover core" -- "${cur}")) + return 0 + ;; + --name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__prepare) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__reset) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__setup) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt) + opts="-c -v -h --check --verbose --chain --ignore-prerequisites --help rustfmt contract prettier help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__contract) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help) + opts="rustfmt contract prettier help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help__contract) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help__prettier) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help__rustfmt) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__prettier) + opts="-t -v -h --targets --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --targets) + COMPREPLY=($(compgen -W "md sol js ts rs contracts autocompletion rust-toolchain" -- "${cur}")) + return 0 + ;; + -t) + COMPREPLY=($(compgen -W "md sol js ts rs contracts autocompletion rust-toolchain" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__rustfmt) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__generate__genesis) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help) + opts="database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__clean) + opts="all containers contracts-cache" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__clean__all) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__clean__containers) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__clean__contracts__cache) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__config__writer) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database) + opts="check-sqlx-data drop migrate new-migration prepare reset setup" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__check__sqlx__data) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__drop) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__migrate) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__new__migration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__prepare) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__reset) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__setup) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__fmt) + opts="rustfmt contract prettier" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__fmt__contract) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__fmt__prettier) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__fmt__rustfmt) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__generate__genesis) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__lint) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__prover) + opts="info insert-batch insert-version" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__prover__info) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__prover__insert__batch) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__prover__insert__version) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__send__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__snapshot) + opts="create" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__snapshot__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__status) + opts="ports" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__status__ports) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test) + opts="integration fees revert recovery upgrade build rust l1-contracts prover wallet loadtest" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__fees) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__integration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__l1__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__loadtest) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__prover) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__recovery) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__revert) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__rust) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__upgrade) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__wallet) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__lint) + opts="-c -t -v -h --check --targets --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --targets) + COMPREPLY=($(compgen -W "md sol js ts rs contracts autocompletion rust-toolchain" -- "${cur}")) + return 0 + ;; + -t) + COMPREPLY=($(compgen -W "md sol js ts rs contracts autocompletion rust-toolchain" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover) + opts="-v -h --verbose --chain --ignore-prerequisites --help info insert-batch insert-version help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help) + opts="info insert-batch insert-version help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help__info) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help__insert__batch) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help__insert__version) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__info) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__insert__batch) + opts="-v -h --number --default --version --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --number) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__insert__version) + opts="-v -h --default --version --snark-wrapper --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --snark-wrapper) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__send__transactions) + opts="-v -h --file --private-key --l1-rpc-url --confirmations --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --file) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --private-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --confirmations) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot) + opts="-v -h --verbose --chain --ignore-prerequisites --help create help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot__create) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot__help) + opts="create help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot__help__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status) + opts="-u -v -h --url --verbose --chain --ignore-prerequisites --help ports help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -u) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status__help) + opts="ports help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status__help__ports) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status__ports) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test) + opts="-v -h --verbose --chain --ignore-prerequisites --help integration fees revert recovery upgrade build rust l1-contracts prover wallet loadtest help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__build) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__fees) + opts="-n -v -h --no-deps --no-kill --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help) + opts="integration fees revert recovery upgrade build rust l1-contracts prover wallet loadtest help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__fees) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__integration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__l1__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__loadtest) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__prover) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__recovery) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__revert) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__rust) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__upgrade) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__wallet) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__integration) + opts="-e -n -t -v -h --external-node --no-deps --test-pattern --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --test-pattern) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -t) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__l1__contracts) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__loadtest) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__prover) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__recovery) + opts="-s -n -v -h --snapshot --no-deps --no-kill --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__revert) + opts="-e -n -v -h --enable-consensus --external-node --no-deps --no-kill --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__rust) + opts="-v -h --options --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --options) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__upgrade) + opts="-n -v -h --no-deps --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__wallet) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem) + opts="-v -h --verbose --chain --ignore-prerequisites --help create build-transactions init change-default-chain setup-observability help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__build__transactions) + opts="-o -a -v -h --sender --l1-rpc-url --out --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --sender) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --out) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__change__default__chain) + opts="-v -h --verbose --chain --ignore-prerequisites --help [NAME]" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__create) + opts="-v -h --ecosystem-name --l1-network --link-to-code --chain-name --chain-id --prover-mode --wallet-creation --wallet-path --l1-batch-commit-data-generator-mode --base-token-address --base-token-price-nominator --base-token-price-denominator --set-as-default --legacy-bridge --evm-emulator --start-containers --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --ecosystem-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-network) + COMPREPLY=($(compgen -W "localhost sepolia holesky mainnet" -- "${cur}")) + return 0 + ;; + --link-to-code) + COMPREPLY=() + if [[ "${BASH_VERSINFO[0]}" -ge 4 ]]; then + compopt -o plusdirs + fi + return 0 + ;; + --chain-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain-id) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --prover-mode) + COMPREPLY=($(compgen -W "no-proofs gpu" -- "${cur}")) + return 0 + ;; + --wallet-creation) + COMPREPLY=($(compgen -W "localhost random empty in-file" -- "${cur}")) + return 0 + ;; + --wallet-path) + local oldifs + if [ -n "${IFS+x}" ]; then + oldifs="$IFS" + fi + IFS=$'\n' + COMPREPLY=($(compgen -f "${cur}")) + if [ -n "${oldifs+x}" ]; then + IFS="$oldifs" + fi + if [[ "${BASH_VERSINFO[0]}" -ge 4 ]]; then + compopt -o filenames + fi + return 0 + ;; + --l1-batch-commit-data-generator-mode) + COMPREPLY=($(compgen -W "rollup validium" -- "${cur}")) + return 0 + ;; + --base-token-address) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --base-token-price-nominator) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --base-token-price-denominator) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --set-as-default) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --evm-emulator) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --start-containers) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help) + opts="create build-transactions init change-default-chain setup-observability help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__build__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__change__default__chain) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__setup__observability) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__init) + opts="-a -d -o -v -h --deploy-erc20 --deploy-ecosystem --ecosystem-contracts-path --l1-rpc-url --verify --verifier --verifier-url --verifier-api-key --resume --additional-args --deploy-paymaster --server-db-url --server-db-name --dont-drop --ecosystem-only --dev --observability --no-port-reallocation --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --deploy-erc20) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --deploy-ecosystem) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --ecosystem-contracts-path) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --deploy-paymaster) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --observability) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__setup__observability) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer) + opts="-v -h --verbose --chain --ignore-prerequisites --help init run-backend run help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help) + opts="init run-backend run help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help__run__backend) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__init) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__run) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__run__backend) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node) + opts="-v -h --verbose --chain --ignore-prerequisites --help configs init build run wait help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__build) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__configs) + opts="-u -v -h --db-url --db-name --l1-rpc-url --use-default --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help) + opts="configs init build run wait help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__wait) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__init) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__run) + opts="-a -v -h --reinit --components --enable-consensus --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --components) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --enable-consensus) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__wait) + opts="-t -v -h --timeout --poll-interval --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --timeout) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -t) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --poll-interval) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help) + opts="autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__autocomplete) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain) + opts="create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-timestamp-asserter deploy-upgrader deploy-paymaster update-token-multiplier-setter" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__accept__chain__ownership) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__build__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__consensus__registry) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__l2__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__multicall3) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__paymaster) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__timestamp__asserter) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__upgrader) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__genesis) + opts="init-database server" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__genesis__init__database) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__genesis__server) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__init) + opts="configs" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__init__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__initialize__bridges) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__register__chain) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__update__token__multiplier__setter) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__consensus) + opts="set-attester-committee get-attester-committee wait-for-registry" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__consensus__get__attester__committee) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__consensus__set__attester__committee) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__consensus__wait__for__registry) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__containers) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__contract__verifier) + opts="build run wait init" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__contract__verifier__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__contract__verifier__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__contract__verifier__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__contract__verifier__wait) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev) + opts="database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__clean) + opts="all containers contracts-cache" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__clean__all) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__clean__containers) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__clean__contracts__cache) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__config__writer) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database) + opts="check-sqlx-data drop migrate new-migration prepare reset setup" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__check__sqlx__data) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__drop) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__migrate) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__new__migration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__prepare) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__reset) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__setup) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__fmt) + opts="rustfmt contract prettier" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__fmt__contract) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__fmt__prettier) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__fmt__rustfmt) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__generate__genesis) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__lint) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__prover) + opts="info insert-batch insert-version" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__prover__info) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__prover__insert__batch) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__prover__insert__version) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__send__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__snapshot) + opts="create" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__snapshot__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__status) + opts="ports" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__status__ports) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test) + opts="integration fees revert recovery upgrade build rust l1-contracts prover wallet loadtest" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__fees) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__integration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__l1__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__loadtest) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__prover) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__recovery) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__revert) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__rust) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__upgrade) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__wallet) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem) + opts="create build-transactions init change-default-chain setup-observability" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__build__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__change__default__chain) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__setup__observability) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__explorer) + opts="init run-backend run" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__explorer__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__explorer__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__explorer__run__backend) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node) + opts="configs init build run wait" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node__wait) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__markdown) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__portal) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover) + opts="init setup-keys run init-bellman-cuda compressor-keys" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__compressor__keys) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__init__bellman__cuda) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__setup__keys) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__server) + opts="build run wait" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__server__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__server__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__server__wait) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__update) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__markdown) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__portal) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover) + opts="-v -h --verbose --chain --ignore-prerequisites --help init setup-keys run init-bellman-cuda compressor-keys help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__compressor__keys) + opts="-v -h --path --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --path) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help) + opts="init setup-keys run init-bellman-cuda compressor-keys help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__compressor__keys) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__init__bellman__cuda) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__setup__keys) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__init) + opts="-u -d -v -h --dev --proof-store-dir --bucket-base-url --credentials-file --bucket-name --location --project-id --shall-save-to-public-bucket --public-store-dir --public-bucket-base-url --public-credentials-file --public-bucket-name --public-location --public-project-id --clone --bellman-cuda-dir --bellman-cuda --setup-compressor-key --path --region --mode --setup-keys --setup-database --prover-db-url --prover-db-name --use-default --dont-drop --cloud-type --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --proof-store-dir) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --bucket-base-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --credentials-file) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --bucket-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --location) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --project-id) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --shall-save-to-public-bucket) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --public-store-dir) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-bucket-base-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-credentials-file) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-bucket-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-location) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-project-id) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --bellman-cuda-dir) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --bellman-cuda) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --setup-compressor-key) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --path) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --region) + COMPREPLY=($(compgen -W "us europe asia" -- "${cur}")) + return 0 + ;; + --mode) + COMPREPLY=($(compgen -W "download generate" -- "${cur}")) + return 0 + ;; + --setup-keys) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --setup-database) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --prover-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --use-default) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -u) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --dont-drop) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -d) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --cloud-type) + COMPREPLY=($(compgen -W "gcp local" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__init__bellman__cuda) + opts="-v -h --clone --bellman-cuda-dir --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --bellman-cuda-dir) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__run) + opts="-l -h -m -v -h --component --round --threads --max-allocation --light-wvg-count --heavy-wvg-count --max-allocation --docker --tag --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --component) + COMPREPLY=($(compgen -W "gateway witness-generator witness-vector-generator prover circuit-prover compressor prover-job-monitor" -- "${cur}")) + return 0 + ;; + --round) + COMPREPLY=($(compgen -W "all-rounds basic-circuits leaf-aggregation node-aggregation recursion-tip scheduler" -- "${cur}")) + return 0 + ;; + --threads) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --max-allocation) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --light-wvg-count) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -l) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --heavy-wvg-count) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -h) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --max-allocation) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -m) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --docker) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --tag) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__setup__keys) + opts="-v -h --region --mode --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --region) + COMPREPLY=($(compgen -W "us europe asia" -- "${cur}")) + return 0 + ;; + --mode) + COMPREPLY=($(compgen -W "download generate" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__server) + opts="-a -v -h --components --genesis --additional-args --uring --verbose --chain --ignore-prerequisites --help build run wait help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --components) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__server__build) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__server__help) + opts="build run wait help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__server__help__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__server__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__server__help__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__server__help__wait) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__server__run) + opts="-a -v -h --components --genesis --additional-args --uring --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --components) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__server__wait) + opts="-t -v -h --timeout --poll-interval --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --timeout) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -t) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --poll-interval) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__update) + opts="-c -v -h --only-config --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + esac +} + +if [[ "${BASH_VERSINFO[0]}" -eq 4 && "${BASH_VERSINFO[1]}" -ge 4 || "${BASH_VERSINFO[0]}" -gt 4 ]]; then + complete -F _zkstack -o nosort -o bashdefault -o default zkstack +else + complete -F _zkstack -o bashdefault -o default zkstack +fi diff --git a/zkstack_cli/crates/zkstack/src/commands/args/autocomplete.rs b/zkstack_cli/crates/zkstack/src/commands/args/autocomplete.rs new file mode 100644 index 000000000000..8e44d644f39a --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/args/autocomplete.rs @@ -0,0 +1,13 @@ +use std::path::PathBuf; + +use clap::Parser; + +#[derive(Debug, Parser)] +pub struct AutocompleteArgs { + /// The shell to generate the autocomplete script for + #[arg(long = "generate", value_enum)] + pub generator: clap_complete::Shell, + /// The out directory to write the autocomplete script to + #[arg(short, long, default_value = "./")] + pub out: PathBuf, +} diff --git a/zkstack_cli/crates/zkstack/src/commands/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/args/mod.rs index d18b05c910e5..477f3a6ae9af 100644 --- a/zkstack_cli/crates/zkstack/src/commands/args/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/args/mod.rs @@ -1,7 +1,7 @@ -pub use containers::*; -pub use run_server::*; -pub use update::*; +pub use self::{autocomplete::*, containers::*, run_server::*, update::*, wait::*}; +mod autocomplete; mod containers; mod run_server; mod update; +mod wait; diff --git a/zkstack_cli/crates/zkstack/src/commands/args/run_server.rs b/zkstack_cli/crates/zkstack/src/commands/args/run_server.rs index d090c0de03f9..40344c90ad05 100644 --- a/zkstack_cli/crates/zkstack/src/commands/args/run_server.rs +++ b/zkstack_cli/crates/zkstack/src/commands/args/run_server.rs @@ -1,22 +1,53 @@ -use clap::Parser; +use clap::{Parser, Subcommand}; use serde::{Deserialize, Serialize}; -use crate::messages::{ - MSG_SERVER_ADDITIONAL_ARGS_HELP, MSG_SERVER_BUILD_HELP, MSG_SERVER_COMPONENTS_HELP, - MSG_SERVER_GENESIS_HELP, MSG_SERVER_URING_HELP, +use crate::{ + commands::args::WaitArgs, + messages::{ + MSG_SERVER_ADDITIONAL_ARGS_HELP, MSG_SERVER_COMPONENTS_HELP, MSG_SERVER_GENESIS_HELP, + MSG_SERVER_URING_HELP, + }, }; +#[derive(Debug, Parser)] +#[command(args_conflicts_with_subcommands = true, flatten_help = true)] +pub struct ServerArgs { + #[command(subcommand)] + command: Option, + #[command(flatten)] + run: RunServerArgs, +} + +#[derive(Debug, Subcommand)] +pub enum ServerCommand { + /// Builds server + Build, + /// Runs server + Run(RunServerArgs), + /// Waits for server to start + Wait(WaitArgs), +} + +impl From for ServerCommand { + fn from(args: ServerArgs) -> Self { + args.command.unwrap_or(ServerCommand::Run(args.run)) + } +} + #[derive(Debug, Serialize, Deserialize, Parser)] pub struct RunServerArgs { - #[clap(long, help = MSG_SERVER_COMPONENTS_HELP)] + #[arg(long, help = MSG_SERVER_COMPONENTS_HELP)] pub components: Option>, - #[clap(long, help = MSG_SERVER_GENESIS_HELP)] + #[arg(long, help = MSG_SERVER_GENESIS_HELP)] pub genesis: bool, - #[clap(long, short)] - #[arg(trailing_var_arg = true, allow_hyphen_values = true, hide = false, help = MSG_SERVER_ADDITIONAL_ARGS_HELP)] + #[arg( + long, short, + trailing_var_arg = true, + allow_hyphen_values = true, + hide = false, + help = MSG_SERVER_ADDITIONAL_ARGS_HELP + )] additional_args: Vec, - #[clap(long, help = MSG_SERVER_BUILD_HELP)] - pub build: bool, - #[clap(help=MSG_SERVER_URING_HELP, long, default_missing_value = "true")] + #[clap(help = MSG_SERVER_URING_HELP, long, default_missing_value = "true")] pub uring: bool, } diff --git a/zkstack_cli/crates/zkstack/src/commands/args/wait.rs b/zkstack_cli/crates/zkstack/src/commands/args/wait.rs new file mode 100644 index 000000000000..a3a7e32ae8b4 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/args/wait.rs @@ -0,0 +1,130 @@ +use std::{fmt, future::Future, time::Duration}; + +use anyhow::Context as _; +use clap::Parser; +use common::logger; +use reqwest::StatusCode; +use serde::{Deserialize, Serialize}; +use tokio::time::MissedTickBehavior; + +use crate::messages::{ + msg_wait_connect_err, msg_wait_non_successful_response, msg_wait_not_healthy, + msg_wait_starting_polling, msg_wait_timeout, MSG_WAIT_POLL_INTERVAL_HELP, + MSG_WAIT_TIMEOUT_HELP, +}; + +#[derive(Debug, Clone, Copy)] +enum PolledComponent { + Prometheus, + HealthCheck, +} + +impl fmt::Display for PolledComponent { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str(match self { + Self::Prometheus => "Prometheus", + Self::HealthCheck => "health check", + }) + } +} + +#[derive(Debug, Parser, Serialize, Deserialize)] +pub struct WaitArgs { + #[arg(long, short = 't', value_name = "SECONDS", help = MSG_WAIT_TIMEOUT_HELP)] + timeout: Option, + #[arg(long, value_name = "MILLIS", help = MSG_WAIT_POLL_INTERVAL_HELP, default_value_t = 100)] + poll_interval: u64, +} + +impl WaitArgs { + pub fn poll_interval(&self) -> Duration { + Duration::from_millis(self.poll_interval) + } + + pub async fn poll_prometheus(&self, port: u16, verbose: bool) -> anyhow::Result<()> { + let component = PolledComponent::Prometheus; + let url = format!("http://127.0.0.1:{port}/metrics"); + self.poll_with_timeout(component, self.poll_inner(component, &url, verbose)) + .await + } + + pub async fn poll_health_check(&self, port: u16, verbose: bool) -> anyhow::Result<()> { + let component = PolledComponent::HealthCheck; + let url = format!("http://127.0.0.1:{port}/health"); + self.poll_with_timeout(component, self.poll_inner(component, &url, verbose)) + .await + } + + pub async fn poll_with_timeout( + &self, + component: impl fmt::Display, + action: impl Future>, + ) -> anyhow::Result<()> { + match self.timeout { + None => action.await, + Some(timeout) => tokio::time::timeout(Duration::from_secs(timeout), action) + .await + .map_err(|_| anyhow::Error::msg(msg_wait_timeout(&component)))?, + } + } + + async fn poll_inner( + &self, + component: PolledComponent, + url: &str, + verbose: bool, + ) -> anyhow::Result<()> { + let poll_interval = Duration::from_millis(self.poll_interval); + let mut interval = tokio::time::interval(poll_interval); + interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + + if verbose { + logger::debug(msg_wait_starting_polling(&component, url, poll_interval)); + } + + let client = reqwest::Client::builder() + .connect_timeout(poll_interval) + .build() + .context("failed to build reqwest::Client")?; + + loop { + interval.tick().await; + + let response = match client.get(url).send().await { + Ok(response) => response, + Err(err) if err.is_connect() || err.is_timeout() => { + continue; + } + Err(err) => { + return Err( + anyhow::Error::new(err).context(msg_wait_connect_err(&component, url)) + ) + } + }; + + match component { + PolledComponent::Prometheus => { + response + .error_for_status() + .with_context(|| msg_wait_non_successful_response(&component))?; + return Ok(()); + } + PolledComponent::HealthCheck => { + if response.status().is_success() { + return Ok(()); + } + + if response.status() == StatusCode::SERVICE_UNAVAILABLE { + if verbose { + logger::debug(msg_wait_not_healthy(url)); + } + } else { + response + .error_for_status() + .with_context(|| msg_wait_non_successful_response(&component))?; + } + } + } + } + } +} diff --git a/zkstack_cli/crates/zkstack/src/commands/autocomplete.rs b/zkstack_cli/crates/zkstack/src/commands/autocomplete.rs new file mode 100644 index 000000000000..0f2105cd5efa --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/autocomplete.rs @@ -0,0 +1,52 @@ +use std::{ + fs::File, + io::{BufWriter, Write}, +}; + +use anyhow::Context; +use clap::CommandFactory; +use clap_complete::{generate, Generator}; +use common::logger; + +use super::args::AutocompleteArgs; +use crate::{ + messages::{msg_generate_autocomplete_file, MSG_OUTRO_AUTOCOMPLETE_GENERATION}, + ZkStack, +}; + +pub fn run(args: AutocompleteArgs) -> anyhow::Result<()> { + let filename = autocomplete_file_name(&args.generator); + let path = args.out.join(filename); + + logger::info(msg_generate_autocomplete_file( + path.to_str() + .context("the output file path is an invalid UTF8 string")?, + )); + + let file = File::create(path).context("Failed to create file")?; + let mut writer = BufWriter::new(file); + + generate_completions(args.generator, &mut writer)?; + + logger::outro(MSG_OUTRO_AUTOCOMPLETE_GENERATION); + + Ok(()) +} + +pub fn generate_completions(gen: G, buf: &mut dyn Write) -> anyhow::Result<()> { + let mut cmd = ZkStack::command(); + let cmd_name = cmd.get_name().to_string(); + + generate(gen, &mut cmd, cmd_name, buf); + + Ok(()) +} + +pub fn autocomplete_file_name(shell: &clap_complete::Shell) -> &'static str { + match shell { + clap_complete::Shell::Bash => "zkstack.sh", + clap_complete::Shell::Fish => "zkstack.fish", + clap_complete::Shell::Zsh => "_zkstack.zsh", + _ => todo!(), + } +} diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs index 5fc46c1b227a..ec37f9ba0304 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs @@ -1,7 +1,7 @@ use std::{path::PathBuf, str::FromStr}; use anyhow::{bail, Context}; -use clap::{Parser, ValueEnum}; +use clap::{Parser, ValueEnum, ValueHint}; use common::{Prompt, PromptConfirm, PromptSelect}; use config::forge_interface::deploy_ecosystem::output::Erc20Token; use serde::{Deserialize, Serialize}; @@ -18,6 +18,7 @@ use crate::{ MSG_BASE_TOKEN_PRICE_DENOMINATOR_PROMPT, MSG_BASE_TOKEN_PRICE_NOMINATOR_HELP, MSG_BASE_TOKEN_PRICE_NOMINATOR_PROMPT, MSG_BASE_TOKEN_SELECTION_PROMPT, MSG_CHAIN_ID_HELP, MSG_CHAIN_ID_PROMPT, MSG_CHAIN_ID_VALIDATOR_ERR, MSG_CHAIN_NAME_PROMPT, + MSG_EVM_EMULATOR_HELP, MSG_EVM_EMULATOR_PROMPT, MSG_L1_BATCH_COMMIT_DATA_GENERATOR_MODE_PROMPT, MSG_L1_COMMIT_DATA_GENERATOR_MODE_HELP, MSG_NUMBER_VALIDATOR_GREATHER_THAN_ZERO_ERR, MSG_NUMBER_VALIDATOR_NOT_ZERO_ERR, MSG_PROVER_MODE_HELP, MSG_PROVER_VERSION_PROMPT, MSG_SET_AS_DEFAULT_HELP, @@ -53,7 +54,7 @@ pub struct ChainCreateArgs { prover_mode: Option, #[clap(long, help = MSG_WALLET_CREATION_HELP, value_enum)] wallet_creation: Option, - #[clap(long, help = MSG_WALLET_PATH_HELP)] + #[clap(long, help = MSG_WALLET_PATH_HELP, value_hint = ValueHint::FilePath)] wallet_path: Option, #[clap(long, help = MSG_L1_COMMIT_DATA_GENERATOR_MODE_HELP)] l1_batch_commit_data_generator_mode: Option, @@ -67,6 +68,8 @@ pub struct ChainCreateArgs { pub(crate) set_as_default: Option, #[clap(long, default_value = "false")] pub(crate) legacy_bridge: bool, + #[arg(long, help = MSG_EVM_EMULATOR_HELP, default_missing_value = "true", num_args = 0..=1)] + evm_emulator: Option, } impl ChainCreateArgs { @@ -75,6 +78,7 @@ impl ChainCreateArgs { number_of_chains: u32, l1_network: &L1Network, possible_erc20: Vec, + link_to_code: String, ) -> anyhow::Result { let mut chain_name = self .chain_name @@ -211,6 +215,12 @@ impl ChainCreateArgs { } }; + let evm_emulator = self.evm_emulator.unwrap_or_else(|| { + PromptConfirm::new(MSG_EVM_EMULATOR_PROMPT) + .default(false) + .ask() + }); + let set_as_default = self.set_as_default.unwrap_or_else(|| { PromptConfirm::new(MSG_SET_AS_DEFAULT_PROMPT) .default(true) @@ -227,6 +237,8 @@ impl ChainCreateArgs { base_token, set_as_default, legacy_bridge: self.legacy_bridge, + evm_emulator, + link_to_code, }) } } @@ -242,6 +254,8 @@ pub struct ChainCreateArgsFinal { pub base_token: BaseToken, pub set_as_default: bool, pub legacy_bridge: bool, + pub evm_emulator: bool, + pub link_to_code: String, } #[derive(Debug, Clone, EnumIter, Display, PartialEq, Eq)] diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs index aaf995985a36..f990cbfd77da 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs @@ -21,7 +21,7 @@ pub struct GenesisArgs { #[clap(long, help = MSG_SERVER_DB_NAME_HELP)] pub server_db_name: Option, #[clap(long, short, help = MSG_USE_DEFAULT_DATABASES_HELP)] - pub use_default: bool, + pub dev: bool, #[clap(long, short, action)] pub dont_drop: bool, } @@ -30,7 +30,7 @@ impl GenesisArgs { pub fn fill_values_with_prompt(self, config: &ChainConfig) -> GenesisArgsFinal { let DBNames { server_name, .. } = generate_db_names(config); let chain_name = config.name.clone(); - if self.use_default { + if self.dev { GenesisArgsFinal { server_db: DatabaseConfig::new(DATABASE_SERVER_URL.clone(), server_name), dont_drop: self.dont_drop, diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs index c26ad6475247..b34809643cf5 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs @@ -24,7 +24,7 @@ pub struct InitConfigsArgs { pub genesis_args: GenesisArgs, #[clap(long, help = MSG_L1_RPC_URL_HELP)] pub l1_rpc_url: Option, - #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP, default_value = "false", default_missing_value = "true", num_args = 0..=1)] + #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP)] pub no_port_reallocation: bool, } diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs index be4d28202b80..a5c7a6890ca1 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs @@ -9,8 +9,9 @@ use crate::{ commands::chain::args::genesis::{GenesisArgs, GenesisArgsFinal}, defaults::LOCAL_RPC_URL, messages::{ - MSG_DEPLOY_PAYMASTER_PROMPT, MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, + MSG_DEPLOY_PAYMASTER_PROMPT, MSG_DEV_ARG_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, + MSG_SERVER_DB_NAME_HELP, MSG_SERVER_DB_URL_HELP, }, }; @@ -22,45 +23,70 @@ pub struct InitArgs { #[clap(flatten)] #[serde(flatten)] pub forge_args: ForgeScriptArgs, - #[clap(flatten, next_help_heading = MSG_GENESIS_ARGS_HELP)] - #[serde(flatten)] - pub genesis_args: GenesisArgs, + #[clap(long, help = MSG_SERVER_DB_URL_HELP)] + pub server_db_url: Option, + #[clap(long, help = MSG_SERVER_DB_NAME_HELP)] + pub server_db_name: Option, + #[clap(long, short, action)] + pub dont_drop: bool, #[clap(long, default_missing_value = "true", num_args = 0..=1)] pub deploy_paymaster: Option, #[clap(long, help = MSG_L1_RPC_URL_HELP)] pub l1_rpc_url: Option, - #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP, default_value = "false", default_missing_value = "true", num_args = 0..=1)] + #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP)] pub no_port_reallocation: bool, + #[clap(long, help = MSG_DEV_ARG_HELP)] + pub dev: bool, } impl InitArgs { + pub fn get_genesis_args(&self) -> GenesisArgs { + GenesisArgs { + server_db_url: self.server_db_url.clone(), + server_db_name: self.server_db_name.clone(), + dev: self.dev, + dont_drop: self.dont_drop, + } + } + pub fn fill_values_with_prompt(self, config: &ChainConfig) -> InitArgsFinal { - let deploy_paymaster = self.deploy_paymaster.unwrap_or_else(|| { - common::PromptConfirm::new(MSG_DEPLOY_PAYMASTER_PROMPT) - .default(true) - .ask() - }); + let genesis = self.get_genesis_args(); + + let deploy_paymaster = if self.dev { + true + } else { + self.deploy_paymaster.unwrap_or_else(|| { + common::PromptConfirm::new(MSG_DEPLOY_PAYMASTER_PROMPT) + .default(true) + .ask() + }) + }; - let l1_rpc_url = self.l1_rpc_url.unwrap_or_else(|| { - let mut prompt = Prompt::new(MSG_L1_RPC_URL_PROMPT); - if config.l1_network == L1Network::Localhost { - prompt = prompt.default(LOCAL_RPC_URL); - } - prompt - .validate_with(|val: &String| -> Result<(), String> { - Url::parse(val) - .map(|_| ()) - .map_err(|_| MSG_L1_RPC_URL_INVALID_ERR.to_string()) - }) - .ask() - }); + let l1_rpc_url = if self.dev { + LOCAL_RPC_URL.to_string() + } else { + self.l1_rpc_url.unwrap_or_else(|| { + let mut prompt = Prompt::new(MSG_L1_RPC_URL_PROMPT); + if config.l1_network == L1Network::Localhost { + prompt = prompt.default(LOCAL_RPC_URL); + } + prompt + .validate_with(|val: &String| -> Result<(), String> { + Url::parse(val) + .map(|_| ()) + .map_err(|_| MSG_L1_RPC_URL_INVALID_ERR.to_string()) + }) + .ask() + }) + }; InitArgsFinal { forge_args: self.forge_args, - genesis_args: self.genesis_args.fill_values_with_prompt(config), + genesis_args: genesis.fill_values_with_prompt(config), deploy_paymaster, l1_rpc_url, no_port_reallocation: self.no_port_reallocation, + dev: self.dev, } } } @@ -72,4 +98,5 @@ pub struct InitArgsFinal { pub deploy_paymaster: bool, pub l1_rpc_url: String, pub no_port_reallocation: bool, + pub dev: bool, } diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs index 5f1be15231bf..d3953c656596 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs @@ -42,7 +42,7 @@ pub(crate) async fn run(args: BuildTransactionsArgs, shell: &Shell) -> anyhow::R logger::note(MSG_SELECTED_CONFIG, logger::object_to_string(&chain_config)); let mut genesis_config = chain_config.get_genesis_config()?; - update_from_chain_config(&mut genesis_config, &chain_config); + update_from_chain_config(&mut genesis_config, &chain_config)?; // Copy ecosystem contracts let mut contracts_config = config diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/common.rs b/zkstack_cli/crates/zkstack/src/commands/chain/common.rs index e0aa0b4e0470..0c35b3ee4fe0 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/common.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/common.rs @@ -27,6 +27,9 @@ pub async fn distribute_eth( if let Some(deployer) = chain_wallets.deployer { addresses.push(deployer.address) } + if let Some(setter) = chain_wallets.token_multiplier_setter { + addresses.push(setter.address) + } common::ethereum::distribute_eth( wallets.operator, addresses, diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/create.rs b/zkstack_cli/crates/zkstack/src/commands/chain/create.rs index 48a320ec27e0..730c1df8d3f2 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/create.rs @@ -3,8 +3,9 @@ use std::cell::OnceCell; use anyhow::Context; use common::{logger, spinner::Spinner}; use config::{ - create_local_configs_dir, create_wallets, traits::SaveConfigWithBasePath, ChainConfig, - EcosystemConfig, + create_local_configs_dir, create_wallets, + traits::{ReadConfigWithBasePath, SaveConfigWithBasePath}, + ChainConfig, EcosystemConfig, GenesisConfig, }; use xshell::Shell; use zksync_basic_types::L2ChainId; @@ -13,8 +14,10 @@ use crate::{ commands::chain::args::create::{ChainCreateArgs, ChainCreateArgsFinal}, messages::{ MSG_ARGS_VALIDATOR_ERR, MSG_CHAIN_CREATED, MSG_CREATING_CHAIN, - MSG_CREATING_CHAIN_CONFIGURATIONS_SPINNER, MSG_SELECTED_CONFIG, + MSG_CREATING_CHAIN_CONFIGURATIONS_SPINNER, MSG_EVM_EMULATOR_HASH_MISSING_ERR, + MSG_SELECTED_CONFIG, }, + utils::link_to_code::resolve_link_to_code, }; pub fn run(args: ChainCreateArgs, shell: &Shell) -> anyhow::Result<()> { @@ -33,6 +36,7 @@ fn create( ecosystem_config.list_of_chains().len() as u32, &ecosystem_config.l1_network, tokens, + ecosystem_config.link_to_code.clone().display().to_string(), ) .context(MSG_ARGS_VALIDATOR_ERR)?; @@ -72,6 +76,15 @@ pub(crate) fn create_chain_inner( (L2ChainId::from(args.chain_id), None) }; let internal_id = ecosystem_config.list_of_chains().len() as u32; + let link_to_code = resolve_link_to_code(shell, chain_path.clone(), args.link_to_code.clone())?; + let default_genesis_config = GenesisConfig::read_with_base_path( + shell, + EcosystemConfig::default_configs_path(&link_to_code), + )?; + let has_evm_emulation_support = default_genesis_config.evm_emulator_hash.is_some(); + if args.evm_emulator && !has_evm_emulation_support { + anyhow::bail!(MSG_EVM_EMULATOR_HASH_MISSING_ERR); + } let chain_config = ChainConfig { id: internal_id, @@ -89,6 +102,7 @@ pub(crate) fn create_chain_inner( wallet_creation: args.wallet_creation, shell: OnceCell::from(shell.clone()), legacy_bridge, + evm_emulator: args.evm_emulator, }; create_wallets( diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs index 8dbd5c371c88..31cfc7f83977 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs @@ -12,7 +12,7 @@ use config::{ input::DeployL2ContractsInput, output::{ ConsensusRegistryOutput, DefaultL2UpgradeOutput, InitializeBridgeOutput, - Multicall3Output, + Multicall3Output, TimestampAsserterOutput, }, }, script_params::DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS, @@ -36,6 +36,7 @@ pub enum Deploy2ContractsOption { InitiailizeBridges, ConsensusRegistry, Multicall3, + TimestampAsserter, } pub async fn run( @@ -93,6 +94,16 @@ pub async fn run( ) .await?; } + Deploy2ContractsOption::TimestampAsserter => { + deploy_timestamp_asserter( + shell, + &chain_config, + &ecosystem_config, + &mut contracts, + args, + ) + .await?; + } Deploy2ContractsOption::InitiailizeBridges => { initialize_bridges( shell, @@ -213,6 +224,27 @@ pub async fn deploy_multicall3( .await } +pub async fn deploy_timestamp_asserter( + shell: &Shell, + chain_config: &ChainConfig, + ecosystem_config: &EcosystemConfig, + contracts_config: &mut ContractsConfig, + forge_args: ForgeScriptArgs, +) -> anyhow::Result<()> { + build_and_deploy( + shell, + chain_config, + ecosystem_config, + forge_args, + Some("runDeployTimestampAsserter"), + |shell, out| { + contracts_config + .set_timestamp_asserter_addr(&TimestampAsserterOutput::read(shell, out)?) + }, + ) + .await +} + pub async fn deploy_l2_contracts( shell: &Shell, chain_config: &ChainConfig, @@ -236,6 +268,8 @@ pub async fn deploy_l2_contracts( contracts_config.set_default_l2_upgrade(&DefaultL2UpgradeOutput::read(shell, out)?)?; contracts_config.set_consensus_registry(&ConsensusRegistryOutput::read(shell, out)?)?; contracts_config.set_multicall3(&Multicall3Output::read(shell, out)?)?; + contracts_config + .set_timestamp_asserter_addr(&TimestampAsserterOutput::read(shell, out)?)?; Ok(()) }, ) diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs index 82986d9b41ae..31c5c681e7d3 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs @@ -81,7 +81,7 @@ pub async fn init_configs( // Initialize genesis config let mut genesis_config = chain_config.get_genesis_config()?; - update_from_chain_config(&mut genesis_config, chain_config); + update_from_chain_config(&mut genesis_config, chain_config)?; genesis_config.save_with_base_path(shell, &chain_config.configs)?; // Initialize contracts config diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/mod.rs index c9a47616486d..82b8656154ab 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/mod.rs @@ -56,6 +56,9 @@ pub enum ChainCommands { /// Deploy L2 multicall3 #[command(alias = "multicall3")] DeployMulticall3(ForgeScriptArgs), + /// Deploy L2 TimestampAsserter + #[command(alias = "timestamp-asserter")] + DeployTimestampAsserter(ForgeScriptArgs), /// Deploy Default Upgrader #[command(alias = "upgrader")] DeployUpgrader(ForgeScriptArgs), @@ -83,6 +86,9 @@ pub(crate) async fn run(shell: &Shell, args: ChainCommands) -> anyhow::Result<() ChainCommands::DeployMulticall3(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::Multicall3).await } + ChainCommands::DeployTimestampAsserter(args) => { + deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::TimestampAsserter).await + } ChainCommands::DeployUpgrader(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::Upgrader).await } diff --git a/zkstack_cli/crates/zkstack/src/commands/consensus/mod.rs b/zkstack_cli/crates/zkstack/src/commands/consensus/mod.rs index 1855a5943dc7..7a998efedbf2 100644 --- a/zkstack_cli/crates/zkstack/src/commands/consensus/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/consensus/mod.rs @@ -3,22 +3,23 @@ use std::{borrow::Borrow, collections::HashMap, path::PathBuf, sync::Arc}; /// Consensus registry contract operations. /// Includes code duplicated from `zksync_node_consensus::registry::abi`. use anyhow::Context as _; -use common::{logger, wallets::Wallet}; +use common::{config::global_config, logger, wallets::Wallet}; use config::EcosystemConfig; use conv::*; use ethers::{ abi::Detokenize, contract::{FunctionCall, Multicall}, middleware::{Middleware, NonceManagerMiddleware, SignerMiddleware}, - providers::{Http, JsonRpcClient, PendingTransaction, Provider, RawCall as _}, + providers::{Http, JsonRpcClient, PendingTransaction, Provider, ProviderError, RawCall as _}, signers::{LocalWallet, Signer as _}, types::{Address, BlockId, H256}, }; +use tokio::time::MissedTickBehavior; use xshell::Shell; use zksync_consensus_crypto::ByteFmt; use zksync_consensus_roles::{attester, validator}; -use crate::{messages, utils::consensus::parse_attester_committee}; +use crate::{commands::args::WaitArgs, messages, utils::consensus::parse_attester_committee}; mod conv; mod proto; @@ -92,6 +93,8 @@ pub enum Command { SetAttesterCommittee(SetAttesterCommitteeCommand), /// Fetches the attester committee from the consensus registry contract. GetAttesterCommittee, + /// Wait until the consensus registry contract is deployed to L2. + WaitForRegistry(WaitArgs), } /// Collection of sent transactions. @@ -210,15 +213,18 @@ impl Setup { }) } + fn consensus_registry_addr(&self) -> anyhow::Result
{ + self.contracts + .l2 + .consensus_registry + .context(messages::MSG_CONSENSUS_REGISTRY_ADDRESS_NOT_CONFIGURED) + } + fn consensus_registry( &self, m: Arc, ) -> anyhow::Result> { - let addr = self - .contracts - .l2 - .consensus_registry - .context(messages::MSG_CONSENSUS_REGISTRY_ADDRESS_NOT_CONFIGURED)?; + let addr = self.consensus_registry_addr()?; Ok(abi::ConsensusRegistry::new(addr, m)) } @@ -276,6 +282,58 @@ impl Setup { parse_attester_committee(attesters).context("parse_attester_committee()") } + async fn wait_for_registry_contract_inner( + &self, + args: &WaitArgs, + verbose: bool, + ) -> anyhow::Result<()> { + let addr = self.consensus_registry_addr()?; + let provider = self.provider().context("provider()")?; + let mut interval = tokio::time::interval(args.poll_interval()); + interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + + if verbose { + logger::debug(messages::msg_wait_consensus_registry_started_polling( + addr, + provider.url(), + )); + } + + loop { + interval.tick().await; + + let code = match provider.get_code(addr, None).await { + Ok(code) => code, + Err(ProviderError::HTTPError(err)) if err.is_connect() || err.is_timeout() => { + continue; + } + Err(err) => { + return Err(anyhow::Error::new(err) + .context(messages::MSG_CONSENSUS_REGISTRY_POLL_ERROR)) + } + }; + if !code.is_empty() { + logger::info(messages::msg_consensus_registry_wait_success( + addr, + code.len(), + )); + return Ok(()); + } + } + } + + async fn wait_for_registry_contract( + &self, + args: &WaitArgs, + verbose: bool, + ) -> anyhow::Result<()> { + args.poll_with_timeout( + messages::MSG_CONSENSUS_REGISTRY_WAIT_COMPONENT, + self.wait_for_registry_contract_inner(args, verbose), + ) + .await + } + async fn set_attester_committee(&self, want: &attester::Committee) -> anyhow::Result<()> { let provider = self.provider().context("provider()")?; let block_id = self.last_block(&provider).await.context("last_block()")?; @@ -410,6 +468,10 @@ impl Command { let got = setup.get_attester_committee().await?; print_attesters(&got); } + Self::WaitForRegistry(args) => { + let verbose = global_config().verbose; + setup.wait_for_registry_contract(&args, verbose).await?; + } } Ok(()) } diff --git a/zkstack_cli/crates/zkstack/src/commands/containers.rs b/zkstack_cli/crates/zkstack/src/commands/containers.rs index 9c11cc2e3efc..8367289bd67f 100644 --- a/zkstack_cli/crates/zkstack/src/commands/containers.rs +++ b/zkstack_cli/crates/zkstack/src/commands/containers.rs @@ -36,10 +36,6 @@ pub fn run(shell: &Shell, args: ContainersArgs) -> anyhow::Result<()> { } pub fn initialize_docker(shell: &Shell, ecosystem: &EcosystemConfig) -> anyhow::Result<()> { - if !shell.path_exists("volumes") { - create_docker_folders(shell)?; - }; - if !shell.path_exists(DOCKER_COMPOSE_FILE) { copy_dockerfile(shell, ecosystem.link_to_code.clone())?; }; @@ -75,14 +71,6 @@ pub fn start_containers(shell: &Shell, observability: bool) -> anyhow::Result<() Ok(()) } -fn create_docker_folders(shell: &Shell) -> anyhow::Result<()> { - shell.create_dir("volumes")?; - shell.create_dir("volumes/postgres")?; - shell.create_dir("volumes/reth")?; - shell.create_dir("volumes/reth/data")?; - Ok(()) -} - fn copy_dockerfile(shell: &Shell, link_to_code: PathBuf) -> anyhow::Result<()> { let docker_compose_file = link_to_code.join(DOCKER_COMPOSE_FILE); diff --git a/zkstack_cli/crates/zkstack/src/commands/contract_verifier/build.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/build.rs new file mode 100644 index 000000000000..0ba72f6b2257 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/build.rs @@ -0,0 +1,26 @@ +use anyhow::Context; +use common::{cmd::Cmd, logger}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use crate::messages::{ + MSG_BUILDING_CONTRACT_VERIFIER, MSG_CHAIN_NOT_FOUND_ERR, + MSG_FAILED_TO_BUILD_CONTRACT_VERIFIER_ERR, +}; + +pub(crate) async fn build(shell: &Shell) -> anyhow::Result<()> { + let ecosystem = EcosystemConfig::from_file(shell)?; + let chain = ecosystem + .load_current_chain() + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let _dir_guard = shell.push_dir(&chain.link_to_code); + + logger::info(MSG_BUILDING_CONTRACT_VERIFIER); + + let mut cmd = Cmd::new(cmd!( + shell, + "cargo build --release --bin zksync_contract_verifier" + )); + cmd = cmd.with_force_run(); + cmd.run().context(MSG_FAILED_TO_BUILD_CONTRACT_VERIFIER_ERR) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/contract_verifier/mod.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/mod.rs index 78bdc5fae7ec..e36e6ba62e7b 100644 --- a/zkstack_cli/crates/zkstack/src/commands/contract_verifier/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/mod.rs @@ -1,22 +1,32 @@ -use args::init::InitContractVerifierArgs; use clap::Subcommand; use xshell::Shell; -pub mod args; -pub mod init; -pub mod run; +use self::args::init::InitContractVerifierArgs; +use crate::commands::args::WaitArgs; + +mod args; +mod build; +mod init; +mod run; +mod wait; #[derive(Subcommand, Debug)] pub enum ContractVerifierCommands { + /// Build contract verifier binary + Build, /// Run contract verifier Run, + /// Wait for contract verifier to start + Wait(WaitArgs), /// Download required binaries for contract verifier Init(InitContractVerifierArgs), } pub(crate) async fn run(shell: &Shell, args: ContractVerifierCommands) -> anyhow::Result<()> { match args { + ContractVerifierCommands::Build => build::build(shell).await, ContractVerifierCommands::Run => run::run(shell).await, + ContractVerifierCommands::Wait(args) => wait::wait(shell, args).await, ContractVerifierCommands::Init(args) => init::run(shell, args).await, } } diff --git a/zkstack_cli/crates/zkstack/src/commands/contract_verifier/run.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/run.rs index 9913ec817e90..ebc33840bdea 100644 --- a/zkstack_cli/crates/zkstack/src/commands/contract_verifier/run.rs +++ b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/run.rs @@ -22,7 +22,7 @@ pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { let mut cmd = Cmd::new(cmd!( shell, - "cargo run --bin zksync_contract_verifier -- --config-path={config_path} --secrets-path={secrets_path}" + "cargo run --release --bin zksync_contract_verifier -- --config-path={config_path} --secrets-path={secrets_path}" )); cmd = cmd.with_force_run(); cmd.run().context(MSG_FAILED_TO_RUN_CONTRACT_VERIFIER_ERR) diff --git a/zkstack_cli/crates/zkstack/src/commands/contract_verifier/wait.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/wait.rs new file mode 100644 index 000000000000..011c888d3041 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/wait.rs @@ -0,0 +1,27 @@ +use anyhow::Context as _; +use common::{config::global_config, logger}; +use config::EcosystemConfig; +use xshell::Shell; + +use crate::{commands::args::WaitArgs, messages::MSG_CHAIN_NOT_FOUND_ERR}; + +pub(crate) async fn wait(shell: &Shell, args: WaitArgs) -> anyhow::Result<()> { + let ecosystem = EcosystemConfig::from_file(shell)?; + let chain = ecosystem + .load_current_chain() + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let verbose = global_config().verbose; + + let prometheus_port = chain + .get_general_config()? + .contract_verifier + .as_ref() + .context("contract verifier config not specified")? + .prometheus_port; + logger::info("Waiting for contract verifier to become alive"); + args.poll_prometheus(prometheus_port, verbose).await?; + logger::info(format!( + "Contract verifier is alive with Prometheus server bound to :{prometheus_port}" + )); + Ok(()) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs index 4cb419ce7a46..0929f5e4623f 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs @@ -5,8 +5,7 @@ use config::{EcosystemConfig, DOCKER_COMPOSE_FILE}; use xshell::Shell; use crate::commands::dev::messages::{ - MSG_CONTRACTS_CLEANING, MSG_CONTRACTS_CLEANING_FINISHED, MSG_DOCKER_COMPOSE_CLEANED, - MSG_DOCKER_COMPOSE_DOWN, MSG_DOCKER_COMPOSE_REMOVE_VOLUMES, + MSG_CONTRACTS_CLEANING, MSG_CONTRACTS_CLEANING_FINISHED, MSG_DOCKER_COMPOSE_DOWN, }; #[derive(Subcommand, Debug)] @@ -35,9 +34,6 @@ pub fn run(shell: &Shell, args: CleanCommands) -> anyhow::Result<()> { pub fn containers(shell: &Shell) -> anyhow::Result<()> { logger::info(MSG_DOCKER_COMPOSE_DOWN); docker::down(shell, DOCKER_COMPOSE_FILE)?; - logger::info(MSG_DOCKER_COMPOSE_REMOVE_VOLUMES); - shell.remove_path("volumes")?; - logger::info(MSG_DOCKER_COMPOSE_CLEANED); Ok(()) } diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs index fbafaec09e6e..8e0384cbca99 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs @@ -2,9 +2,7 @@ use std::path::PathBuf; use clap::Parser; use common::{ - contracts::{ - build_l1_contracts, build_l2_contracts, build_system_contracts, build_test_contracts, - }, + contracts::{build_l1_contracts, build_l2_contracts, build_system_contracts}, logger, spinner::Spinner, }; @@ -14,8 +12,8 @@ use xshell::Shell; use crate::commands::dev::messages::{ MSG_BUILDING_CONTRACTS, MSG_BUILDING_CONTRACTS_SUCCESS, MSG_BUILDING_L1_CONTRACTS_SPINNER, MSG_BUILDING_L2_CONTRACTS_SPINNER, MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER, - MSG_BUILDING_TEST_CONTRACTS_SPINNER, MSG_BUILD_L1_CONTRACTS_HELP, MSG_BUILD_L2_CONTRACTS_HELP, - MSG_BUILD_SYSTEM_CONTRACTS_HELP, MSG_BUILD_TEST_CONTRACTS_HELP, MSG_NOTHING_TO_BUILD_MSG, + MSG_BUILD_L1_CONTRACTS_HELP, MSG_BUILD_L2_CONTRACTS_HELP, MSG_BUILD_SYSTEM_CONTRACTS_HELP, + MSG_NOTHING_TO_BUILD_MSG, }; #[derive(Debug, Parser)] @@ -26,8 +24,6 @@ pub struct ContractsArgs { pub l2_contracts: Option, #[clap(long, alias = "sc", help = MSG_BUILD_SYSTEM_CONTRACTS_HELP, default_missing_value = "true", num_args = 0..=1)] pub system_contracts: Option, - #[clap(long, alias = "test", help = MSG_BUILD_TEST_CONTRACTS_HELP, default_missing_value = "true", num_args = 0..=1)] - pub test_contracts: Option, } impl ContractsArgs { @@ -35,18 +31,15 @@ impl ContractsArgs { if self.l1_contracts.is_none() && self.l2_contracts.is_none() && self.system_contracts.is_none() - && self.test_contracts.is_none() { return vec![ ContractType::L1, ContractType::L2, ContractType::SystemContracts, - ContractType::TestContracts, ]; } let mut contracts = vec![]; - if self.l1_contracts.unwrap_or(false) { contracts.push(ContractType::L1); } @@ -56,10 +49,6 @@ impl ContractsArgs { if self.system_contracts.unwrap_or(false) { contracts.push(ContractType::SystemContracts); } - if self.test_contracts.unwrap_or(false) { - contracts.push(ContractType::TestContracts); - } - contracts } } @@ -69,7 +58,6 @@ pub enum ContractType { L1, L2, SystemContracts, - TestContracts, } struct ContractBuilder { @@ -96,11 +84,6 @@ impl ContractBuilder { msg: MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER.to_string(), link_to_code: ecosystem.link_to_code.clone(), }, - ContractType::TestContracts => Self { - cmd: Box::new(build_test_contracts), - msg: MSG_BUILDING_TEST_CONTRACTS_SPINNER.to_string(), - link_to_code: ecosystem.link_to_code.clone(), - }, } } diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs index 71f21a02e739..04955726706f 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs @@ -1,13 +1,23 @@ +use std::{ + fs::File, + io::{Read, Write}, + path::Path, +}; + +use anyhow::{bail, Context}; use clap::Parser; use common::{cmd::Cmd, logger, spinner::Spinner}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::commands::dev::{ - commands::lint_utils::{get_unignored_files, Target}, - messages::{ - msg_running_linter_for_extension_spinner, msg_running_linters_for_files, - MSG_LINT_CONFIG_PATH_ERR, MSG_RUNNING_CONTRACTS_LINTER_SPINNER, +use crate::commands::{ + autocomplete::{autocomplete_file_name, generate_completions}, + dev::{ + commands::lint_utils::{get_unignored_files, Target}, + messages::{ + msg_running_linter_for_extension_spinner, msg_running_linters_for_files, + MSG_LINT_CONFIG_PATH_ERR, MSG_RUNNING_CONTRACTS_LINTER_SPINNER, + }, }, }; @@ -30,6 +40,8 @@ pub fn run(shell: &Shell, args: LintArgs) -> anyhow::Result<()> { Target::Js, Target::Ts, Target::Contracts, + Target::Autocompletion, + Target::RustToolchain, ] } else { args.targets.clone() @@ -43,10 +55,14 @@ pub fn run(shell: &Shell, args: LintArgs) -> anyhow::Result<()> { match target { Target::Rs => lint_rs(shell, &ecosystem, args.check)?, Target::Contracts => lint_contracts(shell, &ecosystem, args.check)?, + Target::Autocompletion => lint_autocompletion_files(shell, args.check)?, + Target::RustToolchain => check_rust_toolchain(shell)?, ext => lint(shell, &ecosystem, &ext, args.check)?, } } + logger::outro("Linting complete."); + Ok(()) } @@ -56,13 +72,18 @@ fn lint_rs(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::R let link_to_code = &ecosystem.link_to_code; let lint_to_prover = &ecosystem.link_to_code.join("prover"); let link_to_zkstack = &ecosystem.link_to_code.join("zkstack_cli"); - let paths = vec![link_to_code, lint_to_prover, link_to_zkstack]; spinner.freeze(); - for path in paths { + for path in [link_to_code, lint_to_prover, link_to_zkstack] { let _dir_guard = shell.push_dir(path); let mut cmd = cmd!(shell, "cargo clippy"); - let common_args = &["--locked", "--", "-D", "warnings"]; + let mut common_args = vec!["--locked", "--", "-D", "warnings"]; + + if !path.ends_with("prover") { + common_args.push("-D"); + common_args.push("unstable-features"); + } + if !check { cmd = cmd.args(&["--fix", "--allow-dirty"]); } @@ -73,6 +94,34 @@ fn lint_rs(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::R Ok(()) } +fn check_rust_toolchain(shell: &Shell) -> anyhow::Result<()> { + // deserialize /zkstack_cli/rust-toolchain as TOML + let path = Path::new("zkstack_cli/rust-toolchain"); + if !path.exists() { + logger::info("WARNING: Please run this command from the project's root folder"); + return Ok(()); + } + let contents = shell.read_file(path)?; + let zkstack_cli_toolchain: toml::Value = toml::from_str(&contents)?; + + // deserialize /rust-toolchain as TOML + let path = Path::new("rust-toolchain"); + let contents = shell.read_file(path)?; + let zksync_era_toolchain: toml::Value = toml::from_str(&contents)?; + + // check if the toolchains are the same + if zksync_era_toolchain["toolchain"]["channel"] != zkstack_cli_toolchain["toolchain"]["channel"] + { + bail!( + "The Rust toolchains are not the same: ZKsync Era: {} - ZK Stack CLI: {}", + zksync_era_toolchain["toolchain"]["channel"], + zkstack_cli_toolchain["toolchain"]["channel"] + ); + } + + Ok(()) +} + fn get_linter(target: &Target) -> Vec { match target { Target::Rs => vec!["cargo".to_string(), "clippy".to_string()], @@ -81,6 +130,8 @@ fn get_linter(target: &Target) -> Vec { Target::Js => vec!["eslint".to_string()], Target::Ts => vec!["eslint".to_string(), "--ext".to_string(), "ts".to_string()], Target::Contracts => vec![], + Target::Autocompletion => vec![], + Target::RustToolchain => vec![], } } @@ -133,3 +184,45 @@ fn lint_contracts(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> an Ok(()) } + +fn lint_autocompletion_files(_shell: &Shell, check: bool) -> anyhow::Result<()> { + let completion_folder = Path::new("./zkstack_cli/crates/zkstack/completion/"); + if !completion_folder.exists() { + logger::info("WARNING: Please run this command from the project's root folder"); + return Ok(()); + } + + // Array of supported shells + let shells = [ + clap_complete::Shell::Bash, + clap_complete::Shell::Fish, + clap_complete::Shell::Zsh, + ]; + + for shell in shells { + let mut writer = Vec::new(); + + generate_completions(shell, &mut writer) + .context("Failed to generate autocompletion file")?; + + let new = String::from_utf8(writer)?; + + let path = completion_folder.join(autocomplete_file_name(&shell)); + let mut autocomplete_file = File::open(path.clone()) + .context(format!("failed to open {}", autocomplete_file_name(&shell)))?; + + let mut old = String::new(); + autocomplete_file.read_to_string(&mut old)?; + + if new != old { + if !check { + let mut autocomplete_file = File::create(path).context("Failed to create file")?; + autocomplete_file.write_all(new.as_bytes())?; + } else { + bail!("Autocompletion files need to be regenerated. To fix this issue, follow these steps: 1) Build an updated ZK Stack CLI using `zkstackup --local`, 2) Run `zkstack dev lint -t autocompletion` to generate the updated files, and 3) Commit the newly generated files.") + } + } + } + + Ok(()) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs index 9095e445384b..93184de76561 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs @@ -14,6 +14,8 @@ pub enum Target { Ts, Rs, Contracts, + Autocompletion, + RustToolchain, } #[derive(Deserialize, Serialize, Debug)] diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs index ab98e44533fb..a292168dc6e0 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs @@ -10,4 +10,5 @@ pub mod prover; pub mod send_transactions; pub mod snapshot; pub(crate) mod sql_fmt; +pub mod status; pub mod test; diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/args.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/args.rs new file mode 100644 index 000000000000..5ac52bf854a6 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/args.rs @@ -0,0 +1,45 @@ +use anyhow::Context; +use clap::Parser; +use config::EcosystemConfig; +use xshell::Shell; + +use crate::{ + commands::dev::messages::{ + MSG_API_CONFIG_NOT_FOUND_ERR, MSG_STATUS_PORTS_HELP, MSG_STATUS_URL_HELP, + }, + messages::MSG_CHAIN_NOT_FOUND_ERR, +}; + +#[derive(Debug, Parser)] +pub enum StatusSubcommands { + #[clap(about = MSG_STATUS_PORTS_HELP)] + Ports, +} + +#[derive(Debug, Parser)] +pub struct StatusArgs { + #[clap(long, short = 'u', help = MSG_STATUS_URL_HELP)] + pub url: Option, + #[clap(subcommand)] + pub subcommand: Option, +} + +impl StatusArgs { + pub fn get_url(&self, shell: &Shell) -> anyhow::Result { + if let Some(url) = &self.url { + Ok(url.clone()) + } else { + let ecosystem = EcosystemConfig::from_file(shell)?; + let chain = ecosystem + .load_current_chain() + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let general_config = chain.get_general_config()?; + let health_check_port = general_config + .api_config + .context(MSG_API_CONFIG_NOT_FOUND_ERR)? + .healthcheck + .port; + Ok(format!("http://localhost:{}/health", health_check_port)) + } + } +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/draw.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/draw.rs new file mode 100644 index 000000000000..d38d5b6d29f9 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/draw.rs @@ -0,0 +1,88 @@ +use crate::{commands::dev::commands::status::utils::is_port_open, utils::ports::PortInfo}; + +const DEFAULT_LINE_WIDTH: usize = 32; + +pub struct BoxProperties { + longest_line: usize, + border: String, + boxed_msg: Vec, +} + +impl BoxProperties { + fn new(msg: &str) -> Self { + let longest_line = msg + .lines() + .map(|line| line.len()) + .max() + .unwrap_or(0) + .max(DEFAULT_LINE_WIDTH); + let width = longest_line + 2; + let border = "─".repeat(width); + let boxed_msg = msg + .lines() + .map(|line| format!("│ {:longest_line$} │", line)) + .collect(); + Self { + longest_line, + border, + boxed_msg, + } + } +} + +fn single_bordered_box(msg: &str) -> String { + let properties = BoxProperties::new(msg); + format!( + "┌{}┐\n{}\n└{}┘\n", + properties.border, + properties.boxed_msg.join("\n"), + properties.border + ) +} + +pub fn bordered_boxes(msg1: &str, msg2: Option<&String>) -> String { + if msg2.is_none() { + return single_bordered_box(msg1); + } + + let properties1 = BoxProperties::new(msg1); + let properties2 = BoxProperties::new(msg2.unwrap()); + + let max_lines = properties1.boxed_msg.len().max(properties2.boxed_msg.len()); + let header = format!("┌{}┐ ┌{}┐\n", properties1.border, properties2.border); + let footer = format!("└{}┘ └{}┘\n", properties1.border, properties2.border); + + let empty_line1 = format!( + "│ {:longest_line$} │", + "", + longest_line = properties1.longest_line + ); + let empty_line2 = format!( + "│ {:longest_line$} │", + "", + longest_line = properties2.longest_line + ); + + let boxed_info: Vec = (0..max_lines) + .map(|i| { + let line1 = properties1.boxed_msg.get(i).unwrap_or(&empty_line1); + let line2 = properties2.boxed_msg.get(i).unwrap_or(&empty_line2); + format!("{} {}", line1, line2) + }) + .collect(); + + format!("{}{}\n{}", header, boxed_info.join("\n"), footer) +} + +pub fn format_port_info(port_info: &PortInfo) -> String { + let in_use_tag = if is_port_open(port_info.port) { + " [OPEN]" + } else { + "" + }; + + format!( + " - {}{} > {}\n", + port_info.port, in_use_tag, port_info.description + ) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/mod.rs new file mode 100644 index 000000000000..8687fcb04763 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/mod.rs @@ -0,0 +1,135 @@ +use std::collections::HashMap; + +use anyhow::Context; +use args::{StatusArgs, StatusSubcommands}; +use common::logger; +use draw::{bordered_boxes, format_port_info}; +use serde::Deserialize; +use serde_json::Value; +use utils::deslugify; +use xshell::Shell; + +use crate::{ + commands::dev::messages::{ + msg_failed_parse_response, msg_not_ready_components, msg_system_status, + MSG_ALL_COMPONENTS_READY, MSG_COMPONENTS, MSG_SOME_COMPONENTS_NOT_READY, + }, + utils::ports::EcosystemPortsScanner, +}; + +pub mod args; +mod draw; +mod utils; + +const STATUS_READY: &str = "ready"; + +#[derive(Deserialize, Debug)] +struct StatusResponse { + status: String, + components: HashMap, +} + +#[derive(Deserialize, Debug)] +struct Component { + status: String, + details: Option, +} + +fn print_status(health_check_url: String) -> anyhow::Result<()> { + let client = reqwest::blocking::Client::new(); + let response = client.get(&health_check_url).send()?.text()?; + + let status_response: StatusResponse = + serde_json::from_str(&response).context(msg_failed_parse_response(&response))?; + + if status_response.status.to_lowercase() == STATUS_READY { + logger::success(msg_system_status(&status_response.status)); + } else { + logger::warn(msg_system_status(&status_response.status)); + } + + let mut components_info = String::from(MSG_COMPONENTS); + let mut components = Vec::new(); + let mut not_ready_components = Vec::new(); + + for (component_name, component) in status_response.components { + let readable_name = deslugify(&component_name); + let mut component_info = format!("{}:\n - Status: {}", readable_name, component.status); + + if let Some(details) = &component.details { + for (key, value) in details.as_object().unwrap() { + component_info.push_str(&format!("\n - {}: {}", deslugify(key), value)); + } + } + + if component.status.to_lowercase() != STATUS_READY { + not_ready_components.push(readable_name); + } + + components.push(component_info); + } + + components.sort_by(|a, b| { + a.lines() + .count() + .cmp(&b.lines().count()) + .then_with(|| a.cmp(b)) + }); + + for chunk in components.chunks(2) { + components_info.push_str(&bordered_boxes(&chunk[0], chunk.get(1))); + } + + logger::info(components_info); + + if not_ready_components.is_empty() { + logger::outro(MSG_ALL_COMPONENTS_READY); + } else { + logger::warn(MSG_SOME_COMPONENTS_NOT_READY); + logger::outro(msg_not_ready_components(¬_ready_components.join(", "))); + } + + Ok(()) +} + +fn print_ports(shell: &Shell) -> anyhow::Result<()> { + let ports = EcosystemPortsScanner::scan(shell)?; + let grouped_ports = ports.group_by_file_path(); + + let mut all_port_lines: Vec = Vec::new(); + + for (file_path, port_infos) in grouped_ports { + let mut port_info_lines = String::new(); + + for port_info in port_infos { + port_info_lines.push_str(&format_port_info(&port_info)); + } + + all_port_lines.push(format!("{}:\n{}", file_path, port_info_lines)); + } + + all_port_lines.sort_by(|a, b| { + b.lines() + .count() + .cmp(&a.lines().count()) + .then_with(|| a.cmp(b)) + }); + + let mut components_info = String::from("Ports:\n"); + for chunk in all_port_lines.chunks(2) { + components_info.push_str(&bordered_boxes(&chunk[0], chunk.get(1))); + } + + logger::info(components_info); + Ok(()) +} + +pub async fn run(shell: &Shell, args: StatusArgs) -> anyhow::Result<()> { + if let Some(StatusSubcommands::Ports) = args.subcommand { + return print_ports(shell); + } + + let health_check_url = args.get_url(shell)?; + + print_status(health_check_url) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/utils.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/utils.rs new file mode 100644 index 000000000000..399a0fb0fec9 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/utils.rs @@ -0,0 +1,26 @@ +use std::net::TcpListener; + +pub fn is_port_open(port: u16) -> bool { + TcpListener::bind(("0.0.0.0", port)).is_err() || TcpListener::bind(("127.0.0.1", port)).is_err() +} + +pub fn deslugify(name: &str) -> String { + name.split('_') + .map(|word| { + let mut chars = word.chars(); + match chars.next() { + Some(first) => { + let capitalized = first.to_uppercase().collect::() + chars.as_str(); + match capitalized.as_str() { + "Http" => "HTTP".to_string(), + "Api" => "API".to_string(), + "Ws" => "WS".to_string(), + _ => capitalized, + } + } + None => String::new(), + } + }) + .collect::>() + .join(" ") +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs index 83d505aa5753..9e76850ff2e2 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs @@ -7,6 +7,6 @@ use crate::commands::dev::messages::{MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP}; pub struct FeesArgs { #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, - #[clap(short, long, help = MSG_NO_KILL_HELP)] + #[clap(long, help = MSG_NO_KILL_HELP)] pub no_kill: bool, } diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs index cf4734fd82e7..b6ce278a1ca7 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs @@ -11,6 +11,6 @@ pub struct RecoveryArgs { pub snapshot: bool, #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, - #[clap(short, long, help = MSG_NO_KILL_HELP)] + #[clap(long, help = MSG_NO_KILL_HELP)] pub no_kill: bool, } diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs index e4fb7fba2a97..9f86eec7f3df 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs @@ -13,6 +13,6 @@ pub struct RevertArgs { pub external_node: bool, #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, - #[clap(short, long, help = MSG_NO_KILL_HELP)] + #[clap(long, help = MSG_NO_KILL_HELP)] pub no_kill: bool, } diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/utils.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/utils.rs index bcd524bd2cb0..8435b437169d 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/utils.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/utils.rs @@ -16,9 +16,7 @@ use crate::commands::dev::messages::{ pub const TEST_WALLETS_PATH: &str = "etc/test_config/constant/eth.json"; const AMOUNT_FOR_DISTRIBUTION_TO_WALLETS: u128 = 1000000000000000000000; - pub const TS_INTEGRATION_PATH: &str = "core/tests/ts-integration"; -const CONTRACTS_TEST_DATA_PATH: &str = "etc/contracts-test-data"; #[derive(Deserialize)] pub struct TestWallets { @@ -90,9 +88,6 @@ pub fn build_contracts(shell: &Shell, ecosystem_config: &EcosystemConfig) -> any Cmd::new(cmd!(shell, "yarn build")).run()?; Cmd::new(cmd!(shell, "yarn build-yul")).run()?; - let _dir_guard = shell.push_dir(ecosystem_config.link_to_code.join(CONTRACTS_TEST_DATA_PATH)); - Cmd::new(cmd!(shell, "yarn build")).run()?; - spinner.finish(); Ok(()) } diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs index c7e639f8e87c..235aa95ee492 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs @@ -111,12 +111,10 @@ pub(super) const MSG_BUILDING_CONTRACTS: &str = "Building contracts"; pub(super) const MSG_BUILDING_L2_CONTRACTS_SPINNER: &str = "Building L2 contracts.."; pub(super) const MSG_BUILDING_L1_CONTRACTS_SPINNER: &str = "Building L1 contracts.."; pub(super) const MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER: &str = "Building system contracts.."; -pub(super) const MSG_BUILDING_TEST_CONTRACTS_SPINNER: &str = "Building test contracts.."; pub(super) const MSG_BUILDING_CONTRACTS_SUCCESS: &str = "Contracts built successfully"; pub(super) const MSG_BUILD_L1_CONTRACTS_HELP: &str = "Build L1 contracts"; pub(super) const MSG_BUILD_L2_CONTRACTS_HELP: &str = "Build L2 contracts"; pub(super) const MSG_BUILD_SYSTEM_CONTRACTS_HELP: &str = "Build system contracts"; -pub(super) const MSG_BUILD_TEST_CONTRACTS_HELP: &str = "Build test contracts"; // Integration tests related messages pub(super) fn msg_integration_tests_run(external_node: bool) -> String { @@ -157,9 +155,7 @@ pub(super) const MSG_UPGRADE_TEST_RUN_INFO: &str = "Running upgrade test"; pub(super) const MSG_UPGRADE_TEST_RUN_SUCCESS: &str = "Upgrade test ran successfully"; // Cleaning related messages -pub(super) const MSG_DOCKER_COMPOSE_DOWN: &str = "docker compose down"; -pub(super) const MSG_DOCKER_COMPOSE_REMOVE_VOLUMES: &str = "docker compose remove volumes"; -pub(super) const MSG_DOCKER_COMPOSE_CLEANED: &str = "docker compose network cleaned"; +pub(super) const MSG_DOCKER_COMPOSE_DOWN: &str = "docker compose down -v"; pub(super) const MSG_CONTRACTS_CLEANING: &str = "Removing contracts building and deployment artifacts"; pub(super) const MSG_CONTRACTS_CLEANING_FINISHED: &str = @@ -232,5 +228,28 @@ pub(super) const MSG_UNABLE_TO_READ_PARSE_JSON_ERR: &str = "Unable to parse JSON pub(super) const MSG_FAILED_TO_SEND_TXN_ERR: &str = "Failed to send transaction"; pub(super) const MSG_INVALID_L1_RPC_URL_ERR: &str = "Invalid L1 RPC URL"; +// Status related messages +pub(super) const MSG_STATUS_ABOUT: &str = "Get status of the server"; +pub(super) const MSG_API_CONFIG_NOT_FOUND_ERR: &str = "API config not found"; +pub(super) const MSG_STATUS_URL_HELP: &str = "URL of the health check endpoint"; +pub(super) const MSG_STATUS_PORTS_HELP: &str = "Show used ports"; +pub(super) const MSG_COMPONENTS: &str = "Components:\n"; +pub(super) const MSG_ALL_COMPONENTS_READY: &str = + "Overall System Status: All components operational and ready."; +pub(super) const MSG_SOME_COMPONENTS_NOT_READY: &str = + "Overall System Status: Some components are not ready."; + +pub(super) fn msg_system_status(status: &str) -> String { + format!("System Status: {}\n", status) +} + +pub(super) fn msg_failed_parse_response(response: &str) -> String { + format!("Failed to parse response: {}", response) +} + +pub(super) fn msg_not_ready_components(components: &str) -> String { + format!("Not Ready Components: {}", components) +} + // Genesis pub(super) const MSG_GENESIS_FILE_GENERATION_STARTED: &str = "Regenerate genesis file"; diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs index 9272436a9b9d..409c3a764eb1 100644 --- a/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs @@ -1,4 +1,6 @@ use clap::Subcommand; +use commands::status::args::StatusArgs; +use messages::MSG_STATUS_ABOUT; use xshell::Shell; use self::commands::{ @@ -41,6 +43,8 @@ pub enum DevCommands { ConfigWriter(ConfigWriterArgs), #[command(about = MSG_SEND_TXNS_ABOUT)] SendTransactions(SendTransactionsArgs), + #[command(about = MSG_STATUS_ABOUT)] + Status(StatusArgs), #[command(about = MSG_GENERATE_GENESIS_ABOUT, alias = "genesis")] GenerateGenesis, } @@ -59,6 +63,7 @@ pub async fn run(shell: &Shell, args: DevCommands) -> anyhow::Result<()> { DevCommands::SendTransactions(args) => { commands::send_transactions::run(shell, args).await? } + DevCommands::Status(args) => commands::status::run(shell, args).await?, DevCommands::GenerateGenesis => commands::genesis::run(shell).await?, } Ok(()) diff --git a/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs index 2e5c50f4538f..53d9c27be60b 100644 --- a/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs @@ -1,23 +1,20 @@ -use std::path::{Path, PathBuf}; +use std::path::PathBuf; -use anyhow::bail; -use clap::Parser; -use common::{cmd::Cmd, logger, Prompt, PromptConfirm, PromptSelect}; +use clap::{Parser, ValueHint}; +use common::{Prompt, PromptConfirm, PromptSelect}; use serde::{Deserialize, Serialize}; use slugify_rs::slugify; -use strum::{EnumIter, IntoEnumIterator}; +use strum::IntoEnumIterator; use types::{L1Network, WalletCreation}; -use xshell::{cmd, Shell}; +use xshell::Shell; use crate::{ commands::chain::{args::create::ChainCreateArgs, ChainCreateArgsFinal}, messages::{ - msg_path_to_zksync_does_not_exist_err, MSG_CONFIRM_STILL_USE_FOLDER, MSG_ECOSYSTEM_NAME_PROMPT, MSG_L1_NETWORK_HELP, MSG_L1_NETWORK_PROMPT, - MSG_LINK_TO_CODE_HELP, MSG_LINK_TO_CODE_PROMPT, MSG_LINK_TO_CODE_SELECTION_CLONE, - MSG_LINK_TO_CODE_SELECTION_PATH, MSG_NOT_MAIN_REPO_OR_FORK_ERR, - MSG_REPOSITORY_ORIGIN_PROMPT, MSG_START_CONTAINERS_HELP, MSG_START_CONTAINERS_PROMPT, + MSG_LINK_TO_CODE_HELP, MSG_START_CONTAINERS_HELP, MSG_START_CONTAINERS_PROMPT, }, + utils::link_to_code::get_link_to_code, }; #[derive(Debug, Serialize, Deserialize, Parser)] @@ -26,7 +23,7 @@ pub struct EcosystemCreateArgs { pub ecosystem_name: Option, #[clap(long, help = MSG_L1_NETWORK_HELP, value_enum)] pub l1_network: Option, - #[clap(long, help = MSG_LINK_TO_CODE_HELP)] + #[clap(long, help = MSG_LINK_TO_CODE_HELP, value_hint = ValueHint::DirPath)] pub link_to_code: Option, #[clap(flatten)] #[serde(flatten)] @@ -47,23 +44,7 @@ impl EcosystemCreateArgs { .unwrap_or_else(|| Prompt::new(MSG_ECOSYSTEM_NAME_PROMPT).ask()); ecosystem_name = slugify!(&ecosystem_name, separator = "_"); - let link_to_code = self.link_to_code.unwrap_or_else(|| { - let link_to_code_selection = - PromptSelect::new(MSG_REPOSITORY_ORIGIN_PROMPT, LinkToCodeSelection::iter()).ask(); - match link_to_code_selection { - LinkToCodeSelection::Clone => "".to_string(), - LinkToCodeSelection::Path => { - let mut path: String = Prompt::new(MSG_LINK_TO_CODE_PROMPT).ask(); - if let Err(err) = check_link_to_code(shell, &path) { - logger::warn(err); - if !PromptConfirm::new(MSG_CONFIRM_STILL_USE_FOLDER).ask() { - path = pick_new_link_to_code(shell); - } - } - path - } - } - }); + let link_to_code = self.link_to_code.unwrap_or_else(|| get_link_to_code(shell)); let l1_network = self .l1_network @@ -71,7 +52,9 @@ impl EcosystemCreateArgs { // Make the only chain as a default one self.chain.set_as_default = Some(true); - let chain = self.chain.fill_values_with_prompt(0, &l1_network, vec![])?; + let chain = + self.chain + .fill_values_with_prompt(0, &l1_network, vec![], link_to_code.clone())?; let start_containers = self.start_containers.unwrap_or_else(|| { PromptConfirm::new(MSG_START_CONTAINERS_PROMPT) @@ -107,55 +90,3 @@ impl EcosystemCreateArgsFinal { self.chain_args.clone() } } - -#[derive(Debug, Clone, EnumIter, PartialEq, Eq)] -enum LinkToCodeSelection { - Clone, - Path, -} - -impl std::fmt::Display for LinkToCodeSelection { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - LinkToCodeSelection::Clone => write!(f, "{MSG_LINK_TO_CODE_SELECTION_CLONE}"), - LinkToCodeSelection::Path => write!(f, "{MSG_LINK_TO_CODE_SELECTION_PATH}"), - } - } -} - -fn check_link_to_code(shell: &Shell, path: &str) -> anyhow::Result<()> { - let path = Path::new(path); - if !shell.path_exists(path) { - bail!(msg_path_to_zksync_does_not_exist_err( - path.to_str().unwrap() - )); - } - - let _guard = shell.push_dir(path); - let out = String::from_utf8( - Cmd::new(cmd!(shell, "git remote -v")) - .run_with_output()? - .stdout, - )?; - - if !out.contains("matter-labs/zksync-era") { - bail!(MSG_NOT_MAIN_REPO_OR_FORK_ERR); - } - - Ok(()) -} - -fn pick_new_link_to_code(shell: &Shell) -> String { - let link_to_code: String = Prompt::new(MSG_LINK_TO_CODE_PROMPT).ask(); - match check_link_to_code(shell, &link_to_code) { - Ok(_) => link_to_code, - Err(err) => { - logger::warn(err); - if !PromptConfirm::new(MSG_CONFIRM_STILL_USE_FOLDER).ask() { - pick_new_link_to_code(shell) - } else { - link_to_code - } - } - } -} diff --git a/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs index 830b7b25e470..09115fd49ba7 100644 --- a/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs @@ -11,9 +11,9 @@ use crate::{ defaults::LOCAL_RPC_URL, messages::{ MSG_DEPLOY_ECOSYSTEM_PROMPT, MSG_DEPLOY_ERC20_PROMPT, MSG_DEV_ARG_HELP, - MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, - MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, MSG_OBSERVABILITY_HELP, - MSG_OBSERVABILITY_PROMPT, + MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, + MSG_NO_PORT_REALLOCATION_HELP, MSG_OBSERVABILITY_HELP, MSG_OBSERVABILITY_PROMPT, + MSG_SERVER_DB_NAME_HELP, MSG_SERVER_DB_URL_HELP, }, }; @@ -86,9 +86,12 @@ pub struct EcosystemInitArgs { /// Deploy Paymaster contract #[clap(long, default_missing_value = "true", num_args = 0..=1)] pub deploy_paymaster: Option, - #[clap(flatten, next_help_heading = MSG_GENESIS_ARGS_HELP)] - #[serde(flatten)] - pub genesis_args: GenesisArgs, + #[clap(long, help = MSG_SERVER_DB_URL_HELP)] + pub server_db_url: Option, + #[clap(long, help = MSG_SERVER_DB_NAME_HELP)] + pub server_db_name: Option, + #[clap(long, short, action)] + pub dont_drop: bool, /// Initialize ecosystem only and skip chain initialization (chain can be initialized later with `chain init` subcommand) #[clap(long, default_value_t = false)] pub ecosystem_only: bool, @@ -96,11 +99,20 @@ pub struct EcosystemInitArgs { pub dev: bool, #[clap(long, short = 'o', help = MSG_OBSERVABILITY_HELP, default_missing_value = "true", num_args = 0..=1)] pub observability: Option, - #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP, default_value = "false", default_missing_value = "true", num_args = 0..=1)] + #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP)] pub no_port_reallocation: bool, } impl EcosystemInitArgs { + pub fn get_genesis_args(&self) -> GenesisArgs { + GenesisArgs { + server_db_url: self.server_db_url.clone(), + server_db_name: self.server_db_name.clone(), + dev: self.dev, + dont_drop: self.dont_drop, + } + } + pub fn fill_values_with_prompt(self, l1_network: L1Network) -> EcosystemInitArgsFinal { let deploy_erc20 = if self.dev { true diff --git a/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs index 42b8f79b97eb..00d937bba294 100644 --- a/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs @@ -28,7 +28,7 @@ pub async fn deploy_l1( let deploy_config_path = DEPLOY_ECOSYSTEM_SCRIPT_PARAMS.input(&config.link_to_code); let default_genesis_config = GenesisConfig::read_with_base_path(shell, config.get_default_configs_path()) - .context("Context")?; + .context("failed reading genesis config")?; let wallets_config = config.get_wallets()?; // For deploying ecosystem we only need genesis batch params diff --git a/zkstack_cli/crates/zkstack/src/commands/ecosystem/create.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/create.rs index 356b5322980f..203c667ade65 100644 --- a/zkstack_cli/crates/zkstack/src/commands/ecosystem/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/create.rs @@ -1,11 +1,8 @@ -use std::{path::PathBuf, str::FromStr}; - use anyhow::{bail, Context}; -use common::{git, logger, spinner::Spinner}; +use common::{logger, spinner::Spinner}; use config::{ create_local_configs_dir, create_wallets, get_default_era_chain_id, traits::SaveConfigWithBasePath, EcosystemConfig, EcosystemConfigFromFileError, - ZKSYNC_ERA_GIT_REPO, }; use xshell::Shell; @@ -22,11 +19,12 @@ use crate::{ }, }, messages::{ - msg_created_ecosystem, MSG_ARGS_VALIDATOR_ERR, MSG_CLONING_ERA_REPO_SPINNER, - MSG_CREATING_DEFAULT_CHAIN_SPINNER, MSG_CREATING_ECOSYSTEM, - MSG_CREATING_INITIAL_CONFIGURATIONS_SPINNER, MSG_ECOSYSTEM_ALREADY_EXISTS_ERR, - MSG_ECOSYSTEM_CONFIG_INVALID_ERR, MSG_SELECTED_CONFIG, MSG_STARTING_CONTAINERS_SPINNER, + msg_created_ecosystem, MSG_ARGS_VALIDATOR_ERR, MSG_CREATING_DEFAULT_CHAIN_SPINNER, + MSG_CREATING_ECOSYSTEM, MSG_CREATING_INITIAL_CONFIGURATIONS_SPINNER, + MSG_ECOSYSTEM_ALREADY_EXISTS_ERR, MSG_ECOSYSTEM_CONFIG_INVALID_ERR, MSG_SELECTED_CONFIG, + MSG_STARTING_CONTAINERS_SPINNER, }, + utils::link_to_code::resolve_link_to_code, }; pub fn run(args: EcosystemCreateArgs, shell: &Shell) -> anyhow::Result<()> { @@ -55,21 +53,7 @@ fn create(args: EcosystemCreateArgs, shell: &Shell) -> anyhow::Result<()> { let configs_path = create_local_configs_dir(shell, ".")?; - let link_to_code = if args.link_to_code.is_empty() { - let spinner = Spinner::new(MSG_CLONING_ERA_REPO_SPINNER); - let link_to_code = git::clone( - shell, - shell.current_dir(), - ZKSYNC_ERA_GIT_REPO, - "zksync-era", - )?; - spinner.finish(); - link_to_code - } else { - let path = PathBuf::from_str(&args.link_to_code)?; - git::submodule_update(shell, path.clone())?; - path - }; + let link_to_code = resolve_link_to_code(shell, shell.current_dir(), args.link_to_code.clone())?; let spinner = Spinner::new(MSG_CREATING_INITIAL_CONFIGURATIONS_SPINNER); let chain_config = args.chain_config(); diff --git a/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs index 6e006f8d65df..06b9b9161112 100644 --- a/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs @@ -341,10 +341,10 @@ async fn init_chains( }; // Set default values for dev mode let mut deploy_paymaster = init_args.deploy_paymaster; - let mut genesis_args = init_args.genesis_args.clone(); + let mut genesis_args = init_args.get_genesis_args().clone(); if final_init_args.dev { deploy_paymaster = Some(true); - genesis_args.use_default = true; + genesis_args.dev = true; } // Can't initialize multiple chains with the same DB if list_of_chains.len() > 1 { @@ -359,10 +359,13 @@ async fn init_chains( let chain_init_args = chain::args::init::InitArgs { forge_args: final_init_args.forge_args.clone(), - genesis_args: genesis_args.clone(), + server_db_url: genesis_args.server_db_url.clone(), + server_db_name: genesis_args.server_db_name.clone(), + dont_drop: genesis_args.dont_drop, deploy_paymaster, l1_rpc_url: Some(final_init_args.ecosystem.l1_rpc_url.clone()), no_port_reallocation: final_init_args.no_port_reallocation, + dev: final_init_args.dev, }; let final_chain_init_args = chain_init_args.fill_values_with_prompt(&chain_config); diff --git a/zkstack_cli/crates/zkstack/src/commands/explorer/init.rs b/zkstack_cli/crates/zkstack/src/commands/explorer/init.rs index 5c8e10ba2d81..096c45da5d8f 100644 --- a/zkstack_cli/crates/zkstack/src/commands/explorer/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/explorer/init.rs @@ -108,14 +108,15 @@ fn build_explorer_chain_config( // Get L2 RPC URL from general config let l2_rpc_url = general_config.get_l2_rpc_url()?; // Get Verification API URL from general config - let verification_api_url = general_config + let verification_api_port = general_config .contract_verifier .as_ref() - .map(|verifier| &verifier.url) - .context("verification_url")?; + .map(|verifier| verifier.port) + .context("verifier.port")?; + let verification_api_url = format!("http://127.0.0.1:{verification_api_port}"); // Build API URL let api_port = backend_config.ports.api_http_port; - let api_url = format!("http://127.0.0.1:{}", api_port); + let api_url = format!("http://127.0.0.1:{api_port}"); // Build explorer chain config Ok(ExplorerChainConfig { diff --git a/zkstack_cli/crates/zkstack/src/commands/external_node/build.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/build.rs new file mode 100644 index 000000000000..ff15c0c77f30 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/external_node/build.rs @@ -0,0 +1,23 @@ +use anyhow::Context; +use common::{cmd::Cmd, logger}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use crate::messages::{MSG_BUILDING_EN, MSG_CHAIN_NOT_FOUND_ERR, MSG_FAILED_TO_BUILD_EN_ERR}; + +pub(crate) async fn build(shell: &Shell) -> anyhow::Result<()> { + let ecosystem = EcosystemConfig::from_file(shell)?; + let chain = ecosystem + .load_current_chain() + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let _dir_guard = shell.push_dir(&chain.link_to_code); + + logger::info(MSG_BUILDING_EN); + + let mut cmd = Cmd::new(cmd!( + shell, + "cargo build --release --bin zksync_external_node" + )); + cmd = cmd.with_force_run(); + cmd.run().context(MSG_FAILED_TO_BUILD_EN_ERR) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/external_node/mod.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/mod.rs index 095566d24e87..7bd366d5871c 100644 --- a/zkstack_cli/crates/zkstack/src/commands/external_node/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/external_node/mod.rs @@ -1,12 +1,16 @@ -use args::{prepare_configs::PrepareConfigArgs, run::RunExternalNodeArgs}; use clap::Parser; use serde::{Deserialize, Serialize}; use xshell::Shell; +use self::args::{prepare_configs::PrepareConfigArgs, run::RunExternalNodeArgs}; +use crate::commands::args::WaitArgs; + mod args; +mod build; mod init; mod prepare_configs; mod run; +mod wait; #[derive(Debug, Serialize, Deserialize, Parser)] pub enum ExternalNodeCommands { @@ -14,14 +18,20 @@ pub enum ExternalNodeCommands { Configs(PrepareConfigArgs), /// Init databases Init, + /// Build external node + Build, /// Run external node Run(RunExternalNodeArgs), + /// Wait for external node to start + Wait(WaitArgs), } pub async fn run(shell: &Shell, commands: ExternalNodeCommands) -> anyhow::Result<()> { match commands { ExternalNodeCommands::Configs(args) => prepare_configs::run(shell, args), ExternalNodeCommands::Init => init::run(shell).await, + ExternalNodeCommands::Build => build::build(shell).await, ExternalNodeCommands::Run(args) => run::run(shell, args).await, + ExternalNodeCommands::Wait(args) => wait::wait(shell, args).await, } } diff --git a/zkstack_cli/crates/zkstack/src/commands/external_node/wait.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/wait.rs new file mode 100644 index 000000000000..72568c36f363 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/external_node/wait.rs @@ -0,0 +1,35 @@ +use anyhow::Context as _; +use common::{config::global_config, logger}; +use config::{traits::ReadConfigWithBasePath, EcosystemConfig}; +use xshell::Shell; +use zksync_config::configs::GeneralConfig; + +use crate::{ + commands::args::WaitArgs, + messages::{msg_waiting_for_en_success, MSG_CHAIN_NOT_INITIALIZED, MSG_WAITING_FOR_EN}, +}; + +pub async fn wait(shell: &Shell, args: WaitArgs) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + let verbose = global_config().verbose; + + let en_path = chain_config + .external_node_config_path + .clone() + .context("External node is not initialized")?; + let general_config = GeneralConfig::read_with_base_path(shell, &en_path)?; + let health_check_port = general_config + .api_config + .as_ref() + .context("no API config")? + .healthcheck + .port; + + logger::info(MSG_WAITING_FOR_EN); + args.poll_health_check(health_check_port, verbose).await?; + logger::info(msg_waiting_for_en_success(health_check_port)); + Ok(()) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/mod.rs b/zkstack_cli/crates/zkstack/src/commands/mod.rs index c46400cc8657..b5319cbc6bfa 100644 --- a/zkstack_cli/crates/zkstack/src/commands/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/mod.rs @@ -1,4 +1,5 @@ pub mod args; +pub mod autocomplete; pub mod chain; pub mod consensus; pub mod containers; diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs index 280b5b2e91d8..fab798993025 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs @@ -61,7 +61,7 @@ pub struct ProverInitArgs { pub bellman_cuda: Option, #[clap(long, default_missing_value = "true", num_args = 0..=1)] - pub setup_compressor_keys: Option, + pub setup_compressor_key: Option, #[clap(flatten)] pub compressor_keys_args: CompressorKeysArgs, @@ -363,7 +363,7 @@ impl ProverInitArgs { }); } - let download_key = self.clone().setup_compressor_keys.unwrap_or_else(|| { + let download_key = self.clone().setup_compressor_key.unwrap_or_else(|| { PromptConfirm::new(MSG_DOWNLOAD_SETUP_COMPRESSOR_KEY_PROMPT) .default(false) .ask() diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs index d7600ba2d31f..4b3a16a38fca 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs @@ -35,6 +35,8 @@ pub struct ProverRunArgs { pub circuit_prover_args: CircuitProverArgs, #[clap(long)] pub docker: Option, + #[clap(long)] + pub tag: Option, } #[derive( @@ -174,16 +176,16 @@ impl ProverComponent { args.fri_prover_args.max_allocation.unwrap() )); }; - if args - .circuit_prover_args - .witness_vector_generator_count - .is_some() - { + if args.circuit_prover_args.light_wvg_count.is_some() { + additional_args.push(format!( + "--light-wvg-count={}", + args.circuit_prover_args.light_wvg_count.unwrap() + )); + }; + if args.circuit_prover_args.heavy_wvg_count.is_some() { additional_args.push(format!( - "--witness-vector-generator-count={}", - args.circuit_prover_args - .witness_vector_generator_count - .unwrap() + "--heavy-wvg-count={}", + args.circuit_prover_args.heavy_wvg_count.unwrap() )); }; } @@ -240,9 +242,11 @@ impl WitnessVectorGeneratorArgs { #[derive(Debug, Clone, Parser, Default)] pub struct CircuitProverArgs { - #[clap(long)] - pub witness_vector_generator_count: Option, - #[clap(long)] + #[clap(short = 'l', long)] + pub light_wvg_count: Option, + #[clap(short = 'h', long)] + pub heavy_wvg_count: Option, + #[clap(short = 'm', long)] pub max_allocation: Option, } @@ -255,15 +259,21 @@ impl CircuitProverArgs { return Ok(Self::default()); } - let witness_vector_generator_count = - self.witness_vector_generator_count.unwrap_or_else(|| { - Prompt::new("Number of WVG jobs to run in parallel") - .default("1") - .ask() - }); + let light_wvg_count = self.light_wvg_count.unwrap_or_else(|| { + Prompt::new("Number of light WVG jobs to run in parallel") + .default("8") + .ask() + }); + + let heavy_wvg_count = self.heavy_wvg_count.unwrap_or_else(|| { + Prompt::new("Number of heavy WVG jobs to run in parallel") + .default("2") + .ask() + }); Ok(CircuitProverArgs { - witness_vector_generator_count: Some(witness_vector_generator_count), + light_wvg_count: Some(light_wvg_count), + heavy_wvg_count: Some(heavy_wvg_count), max_allocation: self.max_allocation, }) } @@ -300,6 +310,8 @@ impl ProverRunArgs { .ask() }); + let tag = self.tag.unwrap_or("latest2.0".to_string()); + Ok(ProverRunArgs { component: Some(component), witness_generator_args, @@ -307,6 +319,7 @@ impl ProverRunArgs { fri_prover_args: self.fri_prover_args, circuit_prover_args, docker: Some(docker), + tag: Some(tag), }) } } diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs index 863816b9ae69..85495d124041 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/run.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs @@ -33,7 +33,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() let application_args = component.get_application_args(in_docker)?; let additional_args = - component.get_additional_args(in_docker, args, &chain, &path_to_ecosystem)?; + component.get_additional_args(in_docker, args.clone(), &chain, &path_to_ecosystem)?; let (message, error) = match component { ProverComponent::WitnessGenerator => ( @@ -83,6 +83,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() run_dockerized_component( shell, component.image_name(), + &args.tag.unwrap(), &application_args, &additional_args, message, @@ -110,6 +111,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() fn run_dockerized_component( shell: &Shell, image_name: &str, + tag: &str, application_args: &[String], args: &[String], message: &'static str, @@ -124,7 +126,7 @@ fn run_dockerized_component( let mut cmd = Cmd::new(cmd!( shell, - "docker run --net=host -v {path_to_prover}/data/keys:/prover/data/keys -v {path_to_prover}/artifacts:/artifacts -v {path_to_configs}:/configs {application_args...} {image_name} {args...}" + "docker run --net=host -v {path_to_prover}/data/keys:/prover/data/keys -v {path_to_prover}/artifacts:/artifacts -v {path_to_configs}:/configs {application_args...} {image_name}:{tag} {args...}" )); cmd = cmd.with_force_run(); diff --git a/zkstack_cli/crates/zkstack/src/commands/server.rs b/zkstack_cli/crates/zkstack/src/commands/server.rs index be7a676a8252..10f267fb8526 100644 --- a/zkstack_cli/crates/zkstack/src/commands/server.rs +++ b/zkstack_cli/crates/zkstack/src/commands/server.rs @@ -1,5 +1,7 @@ use anyhow::Context; use common::{ + cmd::Cmd, + config::global_config, logger, server::{Server, ServerMode}, }; @@ -7,25 +9,38 @@ use config::{ traits::FileConfigWithDefaultName, ChainConfig, ContractsConfig, EcosystemConfig, GeneralConfig, GenesisConfig, SecretsConfig, WalletsConfig, }; -use xshell::Shell; +use xshell::{cmd, Shell}; use crate::{ - commands::args::RunServerArgs, - messages::{MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_RUN_SERVER_ERR, MSG_STARTING_SERVER}, + commands::args::{RunServerArgs, ServerArgs, ServerCommand, WaitArgs}, + messages::{ + msg_waiting_for_server_success, MSG_BUILDING_SERVER, MSG_CHAIN_NOT_INITIALIZED, + MSG_FAILED_TO_BUILD_SERVER_ERR, MSG_FAILED_TO_RUN_SERVER_ERR, MSG_STARTING_SERVER, + MSG_WAITING_FOR_SERVER, + }, }; -pub fn run(shell: &Shell, args: RunServerArgs) -> anyhow::Result<()> { +pub async fn run(shell: &Shell, args: ServerArgs) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; - let chain_config = ecosystem_config .load_current_chain() .context(MSG_CHAIN_NOT_INITIALIZED)?; - logger::info(MSG_STARTING_SERVER); + match ServerCommand::from(args) { + ServerCommand::Run(args) => run_server(args, &chain_config, shell), + ServerCommand::Build => build_server(&chain_config, shell), + ServerCommand::Wait(args) => wait_for_server(args, &chain_config).await, + } +} - run_server(args, &chain_config, shell)?; +fn build_server(chain_config: &ChainConfig, shell: &Shell) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(&chain_config.link_to_code); - Ok(()) + logger::info(MSG_BUILDING_SERVER); + + let mut cmd = Cmd::new(cmd!(shell, "cargo build --release --bin zksync_server")); + cmd = cmd.with_force_run(); + cmd.run().context(MSG_FAILED_TO_BUILD_SERVER_ERR) } fn run_server( @@ -33,17 +48,13 @@ fn run_server( chain_config: &ChainConfig, shell: &Shell, ) -> anyhow::Result<()> { + logger::info(MSG_STARTING_SERVER); let server = Server::new( args.components.clone(), chain_config.link_to_code.clone(), args.uring, ); - if args.build { - server.build(shell)?; - return Ok(()); - } - let mode = if args.genesis { ServerMode::Genesis } else { @@ -62,3 +73,20 @@ fn run_server( ) .context(MSG_FAILED_TO_RUN_SERVER_ERR) } + +async fn wait_for_server(args: WaitArgs, chain_config: &ChainConfig) -> anyhow::Result<()> { + let verbose = global_config().verbose; + + let health_check_port = chain_config + .get_general_config()? + .api_config + .as_ref() + .context("no API config")? + .healthcheck + .port; + + logger::info(MSG_WAITING_FOR_SERVER); + args.poll_health_check(health_check_port, verbose).await?; + logger::info(msg_waiting_for_server_success(health_check_port)); + Ok(()) +} diff --git a/zkstack_cli/crates/zkstack/src/consts.rs b/zkstack_cli/crates/zkstack/src/consts.rs index ba00af77b5a6..b7c4d2a20709 100644 --- a/zkstack_cli/crates/zkstack/src/consts.rs +++ b/zkstack_cli/crates/zkstack/src/consts.rs @@ -17,14 +17,13 @@ pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; pub const PORTAL_DOCKER_CONFIG_PATH: &str = "/usr/src/app/dist/config.js"; pub const PORTAL_DOCKER_IMAGE: &str = "matterlabs/dapp-portal"; -pub const PROVER_GATEWAY_DOCKER_IMAGE: &str = "matterlabs/prover-fri-gateway:latest2.0"; -pub const WITNESS_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-generator:latest2.0"; -pub const WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE: &str = - "matterlabs/witness-vector-generator:latest2.0"; -pub const PROVER_DOCKER_IMAGE: &str = "matterlabs/prover-gpu-fri:latest2.0"; -pub const CIRCUIT_PROVER_DOCKER_IMAGE: &str = "matterlabs/circuit-prover-gpu:latest2.0"; -pub const COMPRESSOR_DOCKER_IMAGE: &str = "matterlabs/proof-fri-gpu-compressor:latest2.0"; -pub const PROVER_JOB_MONITOR_DOCKER_IMAGE: &str = "matterlabs/prover-job-monitor:latest2.0"; +pub const PROVER_GATEWAY_DOCKER_IMAGE: &str = "matterlabs/prover-fri-gateway"; +pub const WITNESS_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-generator"; +pub const WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-vector-generator"; +pub const PROVER_DOCKER_IMAGE: &str = "matterlabs/prover-gpu-fri"; +pub const CIRCUIT_PROVER_DOCKER_IMAGE: &str = "matterlabs/circuit-prover-gpu"; +pub const COMPRESSOR_DOCKER_IMAGE: &str = "matterlabs/proof-fri-gpu-compressor"; +pub const PROVER_JOB_MONITOR_DOCKER_IMAGE: &str = "matterlabs/prover-job-monitor"; pub const PROVER_GATEWAY_BINARY_NAME: &str = "zksync_prover_fri_gateway"; pub const WITNESS_GENERATOR_BINARY_NAME: &str = "zksync_witness_generator"; diff --git a/zkstack_cli/crates/zkstack/src/main.rs b/zkstack_cli/crates/zkstack/src/main.rs index 987de555ecf6..8a115201fc81 100644 --- a/zkstack_cli/crates/zkstack/src/main.rs +++ b/zkstack_cli/crates/zkstack/src/main.rs @@ -1,6 +1,6 @@ use clap::{command, Parser, Subcommand}; use commands::{ - args::{ContainersArgs, UpdateArgs}, + args::{AutocompleteArgs, ContainersArgs, UpdateArgs}, contract_verifier::ContractVerifierCommands, dev::DevCommands, }; @@ -15,7 +15,7 @@ use config::EcosystemConfig; use xshell::Shell; use crate::commands::{ - args::RunServerArgs, chain::ChainCommands, consensus, ecosystem::EcosystemCommands, + args::ServerArgs, chain::ChainCommands, consensus, ecosystem::EcosystemCommands, explorer::ExplorerCommands, external_node::ExternalNodeCommands, prover::ProverCommands, }; @@ -29,33 +29,36 @@ mod utils; #[derive(Parser, Debug)] #[command( + name = "zkstack", version = version_message(env!("CARGO_PKG_VERSION")), about )] -struct Inception { +struct ZkStack { #[command(subcommand)] - command: InceptionSubcommands, + command: ZkStackSubcommands, #[clap(flatten)] - global: InceptionGlobalArgs, + global: ZkStackGlobalArgs, } #[derive(Subcommand, Debug)] -pub enum InceptionSubcommands { +pub enum ZkStackSubcommands { + /// Create shell autocompletion files + Autocomplete(AutocompleteArgs), /// Ecosystem related commands #[command(subcommand, alias = "e")] Ecosystem(Box), /// Chain related commands #[command(subcommand, alias = "c")] Chain(Box), - /// Chain related commands + /// Supervisor related commands #[command(subcommand)] Dev(DevCommands), /// Prover related commands #[command(subcommand, alias = "p")] Prover(ProverCommands), /// Run server - Server(RunServerArgs), - /// External Node related commands + Server(ServerArgs), + /// External Node related commands #[command(subcommand, alias = "en")] ExternalNode(ExternalNodeCommands), /// Run containers for local development @@ -69,18 +72,20 @@ pub enum InceptionSubcommands { /// Run block-explorer #[command(subcommand)] Explorer(ExplorerCommands), - /// Update ZKsync + /// Consensus utilities #[command(subcommand)] Consensus(consensus::Command), + /// Update ZKsync #[command(alias = "u")] Update(UpdateArgs), + /// Print markdown help #[command(hide = true)] Markdown, } #[derive(Parser, Debug)] #[clap(next_help_heading = "Global options")] -struct InceptionGlobalArgs { +struct ZkStackGlobalArgs { /// Verbose mode #[clap(short, long, global = true)] verbose: bool, @@ -98,8 +103,20 @@ async fn main() -> anyhow::Result<()> { // We must parse arguments before printing the intro, because some autogenerated // Clap commands (like `--version` would look odd otherwise). - let inception_args = Inception::parse(); + let zkstack_args = ZkStack::parse(); + + match run_subcommand(zkstack_args).await { + Ok(_) => {} + Err(error) => { + log_error(error); + std::process::exit(1); + } + } + + Ok(()) +} +async fn run_subcommand(zkstack_args: ZkStack) -> anyhow::Result<()> { init_prompt_theme(); logger::new_empty_line(); @@ -107,52 +124,39 @@ async fn main() -> anyhow::Result<()> { let shell = Shell::new().unwrap(); - init_global_config_inner(&shell, &inception_args.global)?; + init_global_config_inner(&shell, &zkstack_args.global)?; if !global_config().ignore_prerequisites { check_general_prerequisites(&shell); } - match run_subcommand(inception_args, &shell).await { - Ok(_) => {} - Err(error) => { - log_error(error); - std::process::exit(1); - } - } - Ok(()) -} - -async fn run_subcommand(inception_args: Inception, shell: &Shell) -> anyhow::Result<()> { - match inception_args.command { - InceptionSubcommands::Ecosystem(args) => commands::ecosystem::run(shell, *args).await?, - InceptionSubcommands::Chain(args) => commands::chain::run(shell, *args).await?, - InceptionSubcommands::Dev(args) => commands::dev::run(shell, args).await?, - InceptionSubcommands::Prover(args) => commands::prover::run(shell, args).await?, - InceptionSubcommands::Server(args) => commands::server::run(shell, args)?, - InceptionSubcommands::Containers(args) => commands::containers::run(shell, args)?, - InceptionSubcommands::ExternalNode(args) => { - commands::external_node::run(shell, args).await? + match zkstack_args.command { + ZkStackSubcommands::Autocomplete(args) => commands::autocomplete::run(args)?, + ZkStackSubcommands::Ecosystem(args) => commands::ecosystem::run(&shell, *args).await?, + ZkStackSubcommands::Chain(args) => commands::chain::run(&shell, *args).await?, + ZkStackSubcommands::Dev(args) => commands::dev::run(&shell, args).await?, + ZkStackSubcommands::Prover(args) => commands::prover::run(&shell, args).await?, + ZkStackSubcommands::Server(args) => commands::server::run(&shell, args).await?, + ZkStackSubcommands::Containers(args) => commands::containers::run(&shell, args)?, + ZkStackSubcommands::ExternalNode(args) => { + commands::external_node::run(&shell, args).await? } - InceptionSubcommands::ContractVerifier(args) => { - commands::contract_verifier::run(shell, args).await? + ZkStackSubcommands::ContractVerifier(args) => { + commands::contract_verifier::run(&shell, args).await? } - InceptionSubcommands::Explorer(args) => commands::explorer::run(shell, args).await?, - InceptionSubcommands::Consensus(cmd) => cmd.run(shell).await?, - InceptionSubcommands::Portal => commands::portal::run(shell).await?, - InceptionSubcommands::Update(args) => commands::update::run(shell, args).await?, - InceptionSubcommands::Markdown => { - clap_markdown::print_help_markdown::(); + ZkStackSubcommands::Explorer(args) => commands::explorer::run(&shell, args).await?, + ZkStackSubcommands::Consensus(cmd) => cmd.run(&shell).await?, + ZkStackSubcommands::Portal => commands::portal::run(&shell).await?, + ZkStackSubcommands::Update(args) => commands::update::run(&shell, args).await?, + ZkStackSubcommands::Markdown => { + clap_markdown::print_help_markdown::(); } } Ok(()) } -fn init_global_config_inner( - shell: &Shell, - inception_args: &InceptionGlobalArgs, -) -> anyhow::Result<()> { - if let Some(name) = &inception_args.chain { +fn init_global_config_inner(shell: &Shell, zkstack_args: &ZkStackGlobalArgs) -> anyhow::Result<()> { + if let Some(name) = &zkstack_args.chain { if let Ok(config) = EcosystemConfig::from_file(shell) { let chains = config.list_of_chains(); if !chains.contains(name) { @@ -165,9 +169,9 @@ fn init_global_config_inner( } } init_global_config(GlobalConfig { - verbose: inception_args.verbose, - chain_name: inception_args.chain.clone(), - ignore_prerequisites: inception_args.ignore_prerequisites, + verbose: zkstack_args.verbose, + chain_name: zkstack_args.chain.clone(), + ignore_prerequisites: zkstack_args.ignore_prerequisites, }); Ok(()) } diff --git a/zkstack_cli/crates/zkstack/src/messages.rs b/zkstack_cli/crates/zkstack/src/messages.rs index 6d6a1ceb566f..bedcb233b19f 100644 --- a/zkstack_cli/crates/zkstack/src/messages.rs +++ b/zkstack_cli/crates/zkstack/src/messages.rs @@ -1,9 +1,10 @@ -use std::path::Path; +use std::{fmt, path::Path, time::Duration}; use ethers::{ - types::{H160, U256}, + types::{Address, H160, U256}, utils::format_ether, }; +use url::Url; use zksync_consensus_roles::attester; pub(super) const MSG_SETUP_KEYS_DOWNLOAD_SELECTION_PROMPT: &str = @@ -15,6 +16,15 @@ pub(super) const MSG_SELECTED_CONFIG: &str = "Selected config"; pub(super) const MSG_CHAIN_NOT_INITIALIZED: &str = "Chain not initialized. Please create a chain first"; pub(super) const MSG_ARGS_VALIDATOR_ERR: &str = "Invalid arguments"; +pub(super) const MSG_DEV_ARG_HELP: &str = + "Use defaults for all options and flags. Suitable for local development"; + +/// Autocomplete message +pub(super) fn msg_generate_autocomplete_file(filename: &str) -> String { + format!("Generating completion file: {filename}") +} +pub(super) const MSG_OUTRO_AUTOCOMPLETE_GENERATION: &str = + "Autocompletion file correctly generated"; /// Ecosystem create related messages pub(super) const MSG_L1_NETWORK_HELP: &str = "L1 Network"; @@ -54,8 +64,6 @@ pub(super) fn msg_path_to_zksync_does_not_exist_err(path: &str) -> String { pub(super) const MSG_L1_RPC_URL_HELP: &str = "L1 RPC URL"; pub(super) const MSG_NO_PORT_REALLOCATION_HELP: &str = "Do not reallocate ports"; pub(super) const MSG_GENESIS_ARGS_HELP: &str = "Genesis options"; -pub(super) const MSG_DEV_ARG_HELP: &str = - "Deploy ecosystem using all defaults. Suitable for local development"; pub(super) const MSG_OBSERVABILITY_HELP: &str = "Enable Grafana"; pub(super) const MSG_OBSERVABILITY_PROMPT: &str = "Do you want to setup observability? (Grafana)"; pub(super) const MSG_DEPLOY_ECOSYSTEM_PROMPT: &str = @@ -149,6 +157,7 @@ pub(super) const MSG_BASE_TOKEN_ADDRESS_HELP: &str = "Base token address"; pub(super) const MSG_BASE_TOKEN_PRICE_NOMINATOR_HELP: &str = "Base token nominator"; pub(super) const MSG_BASE_TOKEN_PRICE_DENOMINATOR_HELP: &str = "Base token denominator"; pub(super) const MSG_SET_AS_DEFAULT_HELP: &str = "Set as default chain"; +pub(super) const MSG_EVM_EMULATOR_HELP: &str = "Enable EVM emulator"; pub(super) const MSG_CHAIN_NAME_PROMPT: &str = "What do you want to name the chain?"; pub(super) const MSG_CHAIN_ID_PROMPT: &str = "What's the chain id?"; pub(super) const MSG_WALLET_CREATION_PROMPT: &str = "Select how do you want to create the wallet"; @@ -163,6 +172,7 @@ pub(super) const MSG_BASE_TOKEN_PRICE_NOMINATOR_PROMPT: &str = pub(super) const MSG_BASE_TOKEN_PRICE_DENOMINATOR_PROMPT: &str = "What is the base token price denominator?"; pub(super) const MSG_SET_AS_DEFAULT_PROMPT: &str = "Set this chain as default?"; +pub(super) const MSG_EVM_EMULATOR_PROMPT: &str = "Enable EVM emulator?"; pub(super) const MSG_WALLET_PATH_INVALID_ERR: &str = "Invalid path"; pub(super) const MSG_NUMBER_VALIDATOR_NOT_ZERO_ERR: &str = "Number is not zero"; pub(super) const MSG_NUMBER_VALIDATOR_GREATHER_THAN_ZERO_ERR: &str = @@ -177,6 +187,9 @@ pub(super) const MSG_WALLET_CREATION_VALIDATOR_ERR: &str = "Localhost wallet is not supported for external networks"; pub(super) const MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND: &str = "Token Multiplier Setter not found. Specify it in a wallet config"; +pub(super) const MSG_EVM_EMULATOR_HASH_MISSING_ERR: &str = + "Impossible to initialize a chain with EVM emulator: the template genesis config \ + does not contain EVM emulator hash"; /// Chain genesis related messages pub(super) const MSG_L1_SECRETS_MUST_BE_PRESENTED: &str = "L1 secret must be presented"; @@ -252,7 +265,6 @@ pub(super) const MSG_ENABLE_CONSENSUS_HELP: &str = "Enable consensus"; pub(super) const MSG_SERVER_GENESIS_HELP: &str = "Run server in genesis mode"; pub(super) const MSG_SERVER_ADDITIONAL_ARGS_HELP: &str = "Additional arguments that can be passed through the CLI"; -pub(super) const MSG_SERVER_BUILD_HELP: &str = "Build server but don't run it"; pub(super) const MSG_SERVER_URING_HELP: &str = "Enables uring support for RocksDB"; /// Accept ownership related messages @@ -272,6 +284,13 @@ pub(super) const MSG_OBSERVABILITY_RUN_PROMPT: &str = "Do you want to run observ pub(super) const MSG_STARTING_SERVER: &str = "Starting server"; pub(super) const MSG_FAILED_TO_RUN_SERVER_ERR: &str = "Failed to start server"; pub(super) const MSG_PREPARING_EN_CONFIGS: &str = "Preparing External Node config"; +pub(super) const MSG_BUILDING_SERVER: &str = "Building server"; +pub(super) const MSG_FAILED_TO_BUILD_SERVER_ERR: &str = "Failed to build server"; +pub(super) const MSG_WAITING_FOR_SERVER: &str = "Waiting for server to start"; + +pub(super) fn msg_waiting_for_server_success(health_check_port: u16) -> String { + format!("Server is alive with health check server on :{health_check_port}") +} /// Portal related messages pub(super) const MSG_PORTAL_FAILED_TO_FIND_ANY_CHAIN_ERR: &str = @@ -339,7 +358,14 @@ pub(super) const MSG_CONSENSUS_CONFIG_MISSING_ERR: &str = "Consensus config is m pub(super) const MSG_CONSENSUS_SECRETS_MISSING_ERR: &str = "Consensus secrets config is missing"; pub(super) const MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR: &str = "Consensus node key is missing"; +pub(super) const MSG_BUILDING_EN: &str = "Building external node"; +pub(super) const MSG_FAILED_TO_BUILD_EN_ERR: &str = "Failed to build external node"; pub(super) const MSG_STARTING_EN: &str = "Starting external node"; +pub(super) const MSG_WAITING_FOR_EN: &str = "Waiting for external node to start"; + +pub(super) fn msg_waiting_for_en_success(health_check_port: u16) -> String { + format!("External node is alive with health check server on :{health_check_port}") +} /// Prover related messages pub(super) const MSG_GENERATING_SK_SPINNER: &str = "Generating setup keys..."; @@ -417,7 +443,10 @@ pub(super) fn msg_bucket_created(bucket_name: &str) -> String { } /// Contract verifier related messages +pub(super) const MSG_BUILDING_CONTRACT_VERIFIER: &str = "Building contract verifier"; pub(super) const MSG_RUNNING_CONTRACT_VERIFIER: &str = "Running contract verifier"; +pub(super) const MSG_FAILED_TO_BUILD_CONTRACT_VERIFIER_ERR: &str = + "Failed to build contract verifier"; pub(super) const MSG_FAILED_TO_RUN_CONTRACT_VERIFIER_ERR: &str = "Failed to run contract verifier"; pub(super) const MSG_INVALID_ARCH_ERR: &str = "Invalid arch"; pub(super) const MSG_GET_ZKSOLC_RELEASES_ERR: &str = "Failed to get zksolc releases"; @@ -466,6 +495,34 @@ pub(super) const MSG_DIFF_EN_GENERAL_CONFIG: &str = "Added the following fields to the external node generalconfig:"; pub(super) const MSG_UPDATING_ERA_OBSERVABILITY_SPINNER: &str = "Updating era observability..."; +/// Wait-related messages +pub(super) const MSG_WAIT_TIMEOUT_HELP: &str = "Wait timeout in seconds"; +pub(super) const MSG_WAIT_POLL_INTERVAL_HELP: &str = "Poll interval in milliseconds"; + +pub(super) fn msg_wait_starting_polling( + component: &impl fmt::Display, + url: &str, + poll_interval: Duration, +) -> String { + format!("Starting polling {component} at `{url}` each {poll_interval:?}") +} + +pub(super) fn msg_wait_timeout(component: &impl fmt::Display) -> String { + format!("timed out polling {component}") +} + +pub(super) fn msg_wait_connect_err(component: &impl fmt::Display, url: &str) -> String { + format!("failed to connect to {component} at `{url}`") +} + +pub(super) fn msg_wait_non_successful_response(component: &impl fmt::Display) -> String { + format!("non-successful {component} response") +} + +pub(super) fn msg_wait_not_healthy(url: &str) -> String { + format!("Node at `{url}` is not healthy") +} + pub(super) fn msg_diff_genesis_config(chain: &str) -> String { format!( "Found differences between chain {chain} and era genesis configs. Consider updating the chain {chain} genesis config and re-running genesis. Diff:" @@ -504,9 +561,20 @@ pub(super) const MSG_CONSENSUS_REGISTRY_ADDRESS_NOT_CONFIGURED: &str = "consensus registry address not configured"; pub(super) const MSG_CONSENSUS_GENESIS_SPEC_ATTESTERS_MISSING_IN_GENERAL_YAML: &str = "consensus.genesis_spec.attesters missing in general.yaml"; +pub(super) const MSG_CONSENSUS_REGISTRY_POLL_ERROR: &str = "failed querying L2 node"; +pub(super) const MSG_CONSENSUS_REGISTRY_WAIT_COMPONENT: &str = "main node HTTP RPC"; + pub(super) fn msg_setting_attester_committee_failed( got: &attester::Committee, want: &attester::Committee, ) -> String { format!("setting attester committee failed: got {got:?}, want {want:?}") } + +pub(super) fn msg_wait_consensus_registry_started_polling(addr: Address, url: &Url) -> String { + format!("Starting polling L2 HTTP RPC at {url} for code at {addr:?}") +} + +pub(super) fn msg_consensus_registry_wait_success(addr: Address, code_len: usize) -> String { + format!("Consensus registry is deployed at {addr:?}: {code_len} bytes") +} diff --git a/zkstack_cli/crates/zkstack/src/utils/link_to_code.rs b/zkstack_cli/crates/zkstack/src/utils/link_to_code.rs new file mode 100644 index 000000000000..1f2eb487849d --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/utils/link_to_code.rs @@ -0,0 +1,109 @@ +use std::{ + path::{Path, PathBuf}, + str::FromStr, +}; + +use anyhow::bail; +use common::{cmd::Cmd, git, logger, spinner::Spinner, Prompt, PromptConfirm, PromptSelect}; +use config::ZKSYNC_ERA_GIT_REPO; +use strum::{EnumIter, IntoEnumIterator}; +use xshell::{cmd, Shell}; + +use crate::messages::{ + msg_path_to_zksync_does_not_exist_err, MSG_CLONING_ERA_REPO_SPINNER, + MSG_CONFIRM_STILL_USE_FOLDER, MSG_LINK_TO_CODE_PROMPT, MSG_LINK_TO_CODE_SELECTION_CLONE, + MSG_LINK_TO_CODE_SELECTION_PATH, MSG_NOT_MAIN_REPO_OR_FORK_ERR, MSG_REPOSITORY_ORIGIN_PROMPT, +}; + +#[derive(Debug, Clone, EnumIter, PartialEq, Eq)] +enum LinkToCodeSelection { + Clone, + Path, +} + +impl std::fmt::Display for LinkToCodeSelection { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + LinkToCodeSelection::Clone => write!(f, "{MSG_LINK_TO_CODE_SELECTION_CLONE}"), + LinkToCodeSelection::Path => write!(f, "{MSG_LINK_TO_CODE_SELECTION_PATH}"), + } + } +} + +fn check_link_to_code(shell: &Shell, path: &str) -> anyhow::Result<()> { + let path = Path::new(path); + if !shell.path_exists(path) { + bail!(msg_path_to_zksync_does_not_exist_err( + path.to_str().unwrap() + )); + } + + let _guard = shell.push_dir(path); + let out = String::from_utf8( + Cmd::new(cmd!(shell, "git remote -v")) + .run_with_output()? + .stdout, + )?; + + if !out.contains("matter-labs/zksync-era") { + bail!(MSG_NOT_MAIN_REPO_OR_FORK_ERR); + } + + Ok(()) +} + +fn pick_new_link_to_code(shell: &Shell) -> String { + let link_to_code: String = Prompt::new(MSG_LINK_TO_CODE_PROMPT).ask(); + match check_link_to_code(shell, &link_to_code) { + Ok(_) => link_to_code, + Err(err) => { + logger::warn(err); + if !PromptConfirm::new(MSG_CONFIRM_STILL_USE_FOLDER).ask() { + pick_new_link_to_code(shell) + } else { + link_to_code + } + } + } +} + +pub(crate) fn get_link_to_code(shell: &Shell) -> String { + let link_to_code_selection = + PromptSelect::new(MSG_REPOSITORY_ORIGIN_PROMPT, LinkToCodeSelection::iter()).ask(); + match link_to_code_selection { + LinkToCodeSelection::Clone => "".to_string(), + LinkToCodeSelection::Path => { + let mut path: String = Prompt::new(MSG_LINK_TO_CODE_PROMPT).ask(); + if let Err(err) = check_link_to_code(shell, &path) { + logger::warn(err); + if !PromptConfirm::new(MSG_CONFIRM_STILL_USE_FOLDER).ask() { + path = pick_new_link_to_code(shell); + } + } + path + } + } +} + +pub(crate) fn resolve_link_to_code( + shell: &Shell, + base_path: PathBuf, + link_to_code: String, +) -> anyhow::Result { + if link_to_code.is_empty() { + if base_path.join("zksync-era").exists() { + return Ok(base_path.join("zksync-era")); + } + let spinner = Spinner::new(MSG_CLONING_ERA_REPO_SPINNER); + if !base_path.exists() { + shell.create_dir(&base_path)?; + } + let link_to_code = git::clone(shell, base_path, ZKSYNC_ERA_GIT_REPO, "zksync-era")?; + spinner.finish(); + Ok(link_to_code) + } else { + let path = PathBuf::from_str(&link_to_code)?; + git::submodule_update(shell, path.clone())?; + Ok(path) + } +} diff --git a/zkstack_cli/crates/zkstack/src/utils/mod.rs b/zkstack_cli/crates/zkstack/src/utils/mod.rs index cf7a7ef48182..a8bdc00d73fc 100644 --- a/zkstack_cli/crates/zkstack/src/utils/mod.rs +++ b/zkstack_cli/crates/zkstack/src/utils/mod.rs @@ -1,4 +1,5 @@ pub mod consensus; pub mod forge; +pub mod link_to_code; pub mod ports; pub mod rocks_db; diff --git a/zkstack_cli/crates/zkstack/src/utils/ports.rs b/zkstack_cli/crates/zkstack/src/utils/ports.rs index 04c8cef5ff59..6c299b999136 100644 --- a/zkstack_cli/crates/zkstack/src/utils/ports.rs +++ b/zkstack_cli/crates/zkstack/src/utils/ports.rs @@ -12,7 +12,24 @@ use xshell::Shell; use crate::defaults::{DEFAULT_OBSERVABILITY_PORT, PORT_RANGE_END, PORT_RANGE_START}; pub struct EcosystemPorts { - pub ports: HashMap>, + pub ports: HashMap>, +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct PortInfo { + pub port: u16, + pub file_path: String, + pub description: String, +} + +impl fmt::Display for PortInfo { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "[{}] {} >{}", + self.file_path, self.description, self.port + ) + } } impl EcosystemPorts { @@ -20,14 +37,19 @@ impl EcosystemPorts { self.ports.contains_key(&port) } - pub fn add_port_info(&mut self, port: u16, info: String) { + pub fn add_port_info(&mut self, port: u16, info: PortInfo) { + let info = PortInfo { + port, + file_path: info.file_path, + description: info.description, + }; self.ports.entry(port).or_default().push(info); } - pub fn allocate_port(&mut self, range: Range, info: String) -> anyhow::Result { + pub fn allocate_port(&mut self, range: Range, info: PortInfo) -> anyhow::Result { for port in range { if !self.is_port_assigned(port) { - self.add_port_info(port, info.to_string()); + self.add_port_info(port, info); return Ok(port); } } @@ -48,10 +70,15 @@ impl EcosystemPorts { let mut new_ports = HashMap::new(); for (desc, port) in config.get_default_ports()? { let mut new_port = port + offset; + let port_info = PortInfo { + port: new_port, + description: desc.clone(), + ..Default::default() + }; if self.is_port_assigned(new_port) { - new_port = self.allocate_port(port_range.clone(), desc.clone())?; + new_port = self.allocate_port(port_range.clone(), port_info)?; } else { - self.add_port_info(new_port, desc.to_string()); + self.add_port_info(new_port, port_info); } new_ports.insert(desc, new_port); } @@ -89,7 +116,7 @@ impl EcosystemPorts { if let Some(port) = val.as_u64().and_then(|p| u16::try_from(p).ok()) { let new_port = self.allocate_port( (port + offset as u16)..PORT_RANGE_END, - "".to_string(), + PortInfo::default(), )?; *val = Value::Number(serde_yaml::Number::from(new_port)); updated_ports.insert(port, new_port); @@ -132,6 +159,19 @@ impl EcosystemPorts { Ok(()) } + + pub fn group_by_file_path(&self) -> HashMap> { + let mut grouped_ports: HashMap> = HashMap::new(); + for port_infos in self.ports.values() { + for port_info in port_infos { + grouped_ports + .entry(port_info.file_path.clone()) + .or_default() + .push(port_info.clone()); + } + } + grouped_ports + } } impl fmt::Display for EcosystemPorts { @@ -278,8 +318,12 @@ impl EcosystemPortsScanner { ecosystem_ports: &mut EcosystemPorts, ) { if let Some(port) = value.as_u64().and_then(|p| u16::try_from(p).ok()) { - let description = format!("[{}] {}", file_path.display(), path); - ecosystem_ports.add_port_info(port, description); + let info = PortInfo { + port, + file_path: file_path.display().to_string(), + description: path.to_string(), + }; + ecosystem_ports.add_port_info(port, info); } } @@ -318,8 +362,12 @@ impl EcosystemPortsScanner { file_path: &Path, ecosystem_ports: &mut EcosystemPorts, ) { - let description = format!("[{}] {}", file_path.display(), path); - ecosystem_ports.add_port_info(port, description); + let info = PortInfo { + port, + file_path: file_path.display().to_string(), + description: path.to_string(), + }; + ecosystem_ports.add_port_info(port, info); } } @@ -360,7 +408,7 @@ impl ConfigWithChainPorts for ExplorerBackendPorts { mod tests { use std::path::PathBuf; - use crate::utils::ports::{EcosystemPorts, EcosystemPortsScanner}; + use crate::utils::ports::{EcosystemPorts, EcosystemPortsScanner, PortInfo}; #[test] fn test_traverse_yaml() { @@ -414,21 +462,28 @@ mod tests { // Check description: let port_3050_info = ecosystem_ports.ports.get(&3050).unwrap(); assert_eq!(port_3050_info.len(), 1); - assert_eq!( - port_3050_info[0], - "[test_config.yaml] api:web3_json_rpc:http_port" - ); + let expected_port_3050_info = PortInfo { + port: 3050, + file_path: "test_config.yaml".to_string(), + description: "api:web3_json_rpc:http_port".to_string(), + }; + assert_eq!(port_3050_info[0], expected_port_3050_info); let port_3412_info = ecosystem_ports.ports.get(&3412).unwrap(); assert_eq!(port_3412_info.len(), 2); - assert_eq!( - port_3412_info[0], - "[test_config.yaml] api:prometheus:listener_port" - ); - assert_eq!( - port_3412_info[1], - "[test_config.yaml] prometheus:listener_port" - ); + let expected_port_3412_info_0 = PortInfo { + port: 3412, + file_path: "test_config.yaml".to_string(), + description: "api:prometheus:listener_port".to_string(), + }; + let expected_port_3412_info_1 = PortInfo { + port: 3412, + file_path: "test_config.yaml".to_string(), + description: "prometheus:listener_port".to_string(), + }; + + assert_eq!(port_3412_info[0], expected_port_3412_info_0); + assert_eq!(port_3412_info[1], expected_port_3412_info_1); } #[test] @@ -451,7 +506,12 @@ mod tests { assert!(ecosystem_ports.is_port_assigned(3050)); let port_info = ecosystem_ports.ports.get(&3050).unwrap(); - assert_eq!(port_info[0], "[test_config.yaml] web3_json_rpc:http_port"); + let expected_port_info = PortInfo { + port: 3050, + file_path: "test_config.yaml".to_string(), + description: "web3_json_rpc:http_port".to_string(), + }; + assert_eq!(port_info[0], expected_port_info); } #[test] @@ -482,7 +542,12 @@ mod tests { assert!(ecosystem_ports.is_port_assigned(8546)); let port_info = ecosystem_ports.ports.get(&8546).unwrap(); - assert_eq!(port_info[0], "[test_config.yaml] reth:ports"); + let expected_port_info = PortInfo { + port: 8546, + file_path: "test_config.yaml".to_string(), + description: "reth:ports".to_string(), + }; + assert_eq!(port_info[0], expected_port_info); } #[test] diff --git a/zkstack_cli/rust-toolchain b/zkstack_cli/rust-toolchain index dbd41264aa9f..bc5d1d6bbd8e 100644 --- a/zkstack_cli/rust-toolchain +++ b/zkstack_cli/rust-toolchain @@ -1 +1,2 @@ -1.81.0 +[toolchain] +channel = "nightly-2024-08-01"