diff --git a/.githooks/pre-push b/.githooks/pre-push index 73168e08ec42..ef5e77cbc796 100755 --- a/.githooks/pre-push +++ b/.githooks/pre-push @@ -6,14 +6,29 @@ RED='\033[0;31m' NC='\033[0m' # No Color +# Common prompts +INSTALL_PROPT="Please install ZK Stack CLI using zkstackup from https://github.com/matter-labs/zksync-era/tree/main/zkstack_cli/zkstackup" +FORMAT_PROMPT="Please format the code via 'zkstack dev fmt', cannot push unformatted code" + # Check that prettier formatting rules are not violated. -if which zk_supervisor >/dev/null; then - if ! zk_supervisor fmt --check; then +if which zkstack >/dev/null; then + if ! zkstack dev fmt --check; then echo -e "${RED}Push error!${NC}" - echo "Please format the code via 'zks fmt', cannot push unformatted code" + echo -e "${FORMAT_PROMPT}" exit 1 fi else - echo "Please install zk_toolbox using zkup from https://github.com/matter-labs/zksync-era/tree/main/zk_toolbox/zkup, and then run ./bin/zkt from the zksync-era repository." - exit 1 + if which zk_supervisor >/dev/null; then + echo -e "${RED}WARNING: zkup, zk_inception/zki, and zk_supervisor/zks are DEPRECATED.${NC}" + echo -e "${RED}${INSTALL_PROPT}${NC}" + + if ! zk_supervisor fmt --check; then + echo -e "${RED}Push error!${NC}" + echo -e "${FORMAT_PROMPT}" + exit 1 + fi + else + echo -e "${INSTALL_PROPT}" + exit 1 + fi fi diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index a712db9f75b4..d68b45e9d435 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -17,4 +17,4 @@ - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. -- [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. +- [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. diff --git a/.github/release-please/config.json b/.github/release-please/config.json index 86839e804ca4..358e249a18bd 100644 --- a/.github/release-please/config.json +++ b/.github/release-please/config.json @@ -20,9 +20,9 @@ "release-type": "simple", "component": "prover" }, - "zk_toolbox": { + "zkstack_cli": { "release-type": "simple", - "component": "zk_toolbox", + "component": "zkstack_cli", "plugins": [ "cargo-workspace" ] diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index e0e8fbeecf74..ca19e91219d9 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "core": "24.28.0", "prover": "16.5.0", - "zk_toolbox": "0.1.2" + "zkstack_cli": "0.1.2" } diff --git a/.github/workflows/build-contract-verifier-template.yml b/.github/workflows/build-contract-verifier-template.yml index bb385b2797b2..e4d04b90410e 100644 --- a/.github/workflows/build-contract-verifier-template.yml +++ b/.github/workflows/build-contract-verifier-template.yml @@ -113,15 +113,19 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run ./bin/zkt || true ci_run ./bin/zk || true ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + - name: install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + - name: build contracts if: env.BUILD_CONTRACTS == 'true' run: | ci_run cp etc/tokens/{test,localhost}.json - ci_run zk_supervisor contracts + ci_run zkstack dev contracts - name: Login to Docker registries if: ${{ inputs.action == 'push' }} diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index deaf087cd3eb..33053b6a4000 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -127,14 +127,18 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts ci_run ./bin/zk || true - ci_run ./bin/zkt || true ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local - name: build contracts if: env.BUILD_CONTRACTS == 'true' run: | ci_run cp etc/tokens/{test,localhost}.json - ci_run zk_supervisor contracts + ci_run zkstack dev contracts - name: Login to Docker registries if: ${{ inputs.action == 'push' }} diff --git a/.github/workflows/build-local-node-docker.yml b/.github/workflows/build-local-node-docker.yml index f664bfaaa00a..80142cb6005c 100644 --- a/.github/workflows/build-local-node-docker.yml +++ b/.github/workflows/build-local-node-docker.yml @@ -53,6 +53,11 @@ jobs: mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres + + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g - name: init run: | @@ -61,9 +66,11 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts ci_run zk - ci_run zkt ci_run cp etc/tokens/{test,localhost}.json - ci_run zk_supervisor contracts + + - name: build contracts + run: | + ci_run zkstack dev contracts - name: update-image run: | diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml index 2f51229aeaf9..7d75fb224d6e 100644 --- a/.github/workflows/ci-common-reusable.yml +++ b/.github/workflows/ci-common-reusable.yml @@ -29,13 +29,14 @@ jobs: run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - - - name: Init + + - name: Install zkstack run: | - ci_run zkt + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local # This does both linting and "building". We're using `zk lint prover` as it's common practice within our repo # `zk lint prover` = cargo clippy, which does cargo check behind the scenes, which is a lightweight version of cargo build - name: Lints - run: ci_run zk_supervisor lint -t rs --check + run: ci_run zkstack dev lint -t rs --check diff --git a/.github/workflows/ci-core-lint-reusable.yml b/.github/workflows/ci-core-lint-reusable.yml index 6d0785fe46f1..53b25835ff57 100644 --- a/.github/workflows/ci-core-lint-reusable.yml +++ b/.github/workflows/ci-core-lint-reusable.yml @@ -26,24 +26,30 @@ jobs: - name: Start services run: | ci_localnet_up + + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local - name: Build run: | - ci_run ./bin/zkt ci_run yarn install ci_run git config --global --add safe.directory /usr/src/zksync - ci_run zk_supervisor db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} + - name: Database setup + run: | + ci_run zkstack dev db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} - name: Lints run: | - ci_run zk_supervisor fmt --check - ci_run zk_supervisor lint -t md --check - ci_run zk_supervisor lint -t sol --check - ci_run zk_supervisor lint -t js --check - ci_run zk_supervisor lint -t ts --check - ci_run zk_supervisor lint -t rs --check + ci_run zkstack dev fmt --check + ci_run zkstack dev lint -t md --check + ci_run zkstack dev lint -t sol --check + ci_run zkstack dev lint -t js --check + ci_run zkstack dev lint -t ts --check + ci_run zkstack dev lint -t rs --check - name: Check Database run: | - ci_run zk_supervisor database check-sqlx-data --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} + ci_run zkstack dev database check-sqlx-data --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 58edc228b24d..2ab167f0ba0c 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -56,15 +56,22 @@ jobs: - name: Init run: | ci_run run_retried rustup show - ci_run ./bin/zkt - ci_run zk_supervisor contracts + + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local + + - name: Build contracts + run: | + ci_run zkstack dev contracts - name: Contracts unit tests run: ci_run yarn l1-contracts test - name: Rust unit tests run: | - ci_run zk_supervisor test rust + ci_run zkstack dev test rust # Benchmarks are not tested by `cargo nextest` unless specified explicitly, and even then `criterion` harness is incompatible # with how `cargo nextest` runs tests. Thus, we run criterion-based benchmark tests manually. ci_run cargo test --release -p vm-benchmark --bench oneshot --bench batch @@ -113,8 +120,15 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run ./bin/zkt - ci_run zk_inception chain create \ + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + + + - name: Create and initialize legacy chain + run: | + ci_run zkstack chain create \ --chain-name legacy \ --chain-id sequential \ --prover-mode no-proofs \ @@ -127,18 +141,18 @@ jobs: --ignore-prerequisites \ --legacy-bridge - ci_run zk_inception ecosystem init --dev --verbose - ci_run zk_supervisor contracts --test-contracts + ci_run zkstack ecosystem init --dev --verbose + ci_run zkstack dev contracts --test-contracts # `sleep 60` because we need to wait until server added all the tokens - name: Run server run: | - ci_run zk_supervisor config-writer --path ${{ matrix.vm_mode == 'NEW' && 'etc/env/file_based/overrides/tests/loadtest-new.yaml' || 'etc/env/file_based/overrides/tests/loadtest-old.yaml' }} --chain legacy - ci_run zk_inception server --uring --chain=legacy --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & + ci_run zkstack dev config-writer --path ${{ matrix.vm_mode == 'NEW' && 'etc/env/file_based/overrides/tests/loadtest-new.yaml' || 'etc/env/file_based/overrides/tests/loadtest-old.yaml' }} --chain legacy + ci_run zkstack server --uring --chain=legacy --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & ci_run sleep 60 - name: Perform loadtest - run: ci_run zk_supervisor t loadtest -v --chain=legacy + run: ci_run zkstack dev t loadtest -v --chain=legacy - name: Show server.log logs if: always() @@ -175,9 +189,11 @@ jobs: run: | ci_localnet_up - - name: Build zk_toolbox - run: ci_run bash -c "./bin/zkt" - + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + - name: Create log directories run: | SERVER_LOGS_DIR=logs/server @@ -213,7 +229,7 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run zk_inception ecosystem init --deploy-paymaster --deploy-erc20 \ + ci_run zkstack ecosystem init --deploy-paymaster --deploy-erc20 \ --deploy-ecosystem --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_era \ @@ -228,7 +244,7 @@ jobs: - name: Create and initialize Validium chain run: | - ci_run zk_inception chain create \ + ci_run zkstack chain create \ --chain-name validium \ --chain-id sequential \ --prover-mode no-proofs \ @@ -240,7 +256,7 @@ jobs: --set-as-default false \ --ignore-prerequisites - ci_run zk_inception chain init \ + ci_run zkstack chain init \ --deploy-paymaster \ --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ @@ -249,7 +265,7 @@ jobs: - name: Create and initialize chain with Custom Token run: | - ci_run zk_inception chain create \ + ci_run zkstack chain create \ --chain-name custom_token \ --chain-id sequential \ --prover-mode no-proofs \ @@ -261,7 +277,7 @@ jobs: --set-as-default false \ --ignore-prerequisites - ci_run zk_inception chain init \ + ci_run zkstack chain init \ --deploy-paymaster \ --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ @@ -270,7 +286,7 @@ jobs: - name: Create and register chain with transactions signed "offline" run: | - ci_run zk_inception chain create \ + ci_run zkstack chain create \ --chain-name offline_chain \ --chain-id sequential \ --prover-mode no-proofs \ @@ -282,11 +298,11 @@ jobs: --set-as-default false \ --ignore-prerequisites - ci_run zk_inception chain build-transactions --chain offline_chain --l1-rpc-url http://127.0.0.1:8545 + ci_run zkstack chain build-transactions --chain offline_chain --l1-rpc-url http://127.0.0.1:8545 governor_pk=$(awk '/governor:/ {flag=1} flag && /private_key:/ {print $2; exit}' ./configs/wallets.yaml) - ci_run zk_supervisor send-transactions \ + ci_run zkstack dev send-transactions \ --file ./transactions/chain/offline_chain/register-hyperchain-txns.json \ --l1-rpc-url http://127.0.0.1:8545 \ --private-key $governor_pk @@ -305,7 +321,7 @@ jobs: - name: Create and initialize Consensus chain run: | - ci_run zk_inception chain create \ + ci_run zkstack chain create \ --chain-name consensus \ --chain-id sequential \ --prover-mode no-proofs \ @@ -317,28 +333,33 @@ jobs: --set-as-default false \ --ignore-prerequisites - ci_run zk_inception chain init \ + ci_run zkstack chain init \ --deploy-paymaster \ --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_consensus \ --chain consensus + - name: Export chain list to environment variable + run: | + CHAINS="era,validium,custom_token,consensus" + echo "CHAINS=$CHAINS" >> $GITHUB_ENV + - name: Build test dependencies run: | - ci_run zk_supervisor test build + ci_run zkstack dev test build - name: Initialize Contract verifier run: | - ci_run zk_inception contract-verifier init --zksolc-version=v1.5.3 --zkvyper-version=v1.5.4 --solc-version=0.8.26 --vyper-version=v0.3.10 --era-vm-solc-version=0.8.26-1.0.1 --only --chain era - ci_run zk_inception contract-verifier run --chain era &> ${{ env.SERVER_LOGS_DIR }}/contract-verifier-rollup.log & + ci_run zkstack contract-verifier init --zksolc-version=v1.5.3 --zkvyper-version=v1.5.4 --solc-version=0.8.26 --vyper-version=v0.3.10 --era-vm-solc-version=0.8.26-1.0.1 --only --chain era + ci_run zkstack contract-verifier run --chain era &> ${{ env.SERVER_LOGS_DIR }}/contract-verifier-rollup.log & - name: Run servers run: | - ci_run zk_inception server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & - ci_run zk_inception server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & - ci_run zk_inception server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & - ci_run zk_inception server --ignore-prerequisites --chain consensus \ + ci_run zkstack server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & + ci_run zkstack server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & + ci_run zkstack server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & + ci_run zkstack server --ignore-prerequisites --chain consensus \ --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher,consensus \ &> ${{ env.SERVER_LOGS_DIR }}/consensus.log & @@ -346,161 +367,67 @@ jobs: - name: Setup attester committee for the consensus chain run: | - ci_run zk_inception consensus set-attester-committee --chain consensus &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log + ci_run zkstack consensus set-attester-committee --chain consensus --from-genesis &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log - name: Run integration tests run: | - PASSED_ENV_VARS="RUN_CONTRACT_VERIFICATION_TEST" \ - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain era &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain validium &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain custom_token &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain consensus &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 + ci_run ./bin/run_on_all_chains.sh "zkstack dev test integration --no-deps --ignore-prerequisites" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Init external nodes run: | - ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --db-name=zksync_en_localhost_era_rollup --l1-rpc-url=http://localhost:8545 --chain era - ci_run zk_inception external-node init --ignore-prerequisites --chain era + ci_run zkstack external-node init --ignore-prerequisites --chain era - ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --db-name=zksync_en_localhost_era_validium1 --l1-rpc-url=http://localhost:8545 --chain validium - ci_run zk_inception external-node init --ignore-prerequisites --chain validium + ci_run zkstack external-node init --ignore-prerequisites --chain validium - ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --db-name=zksync_en_localhost_era_custom_token --l1-rpc-url=http://localhost:8545 --chain custom_token - ci_run zk_inception external-node init --ignore-prerequisites --chain custom_token + ci_run zkstack external-node init --ignore-prerequisites --chain custom_token - ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --db-name=zksync_en_localhost_era_consensus --l1-rpc-url=http://localhost:8545 --chain consensus - ci_run zk_inception external-node init --ignore-prerequisites --chain consensus + ci_run zkstack external-node init --ignore-prerequisites --chain consensus - name: Run recovery tests (from snapshot) run: | - - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain era &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain validium &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain custom_token &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain consensus &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 + ci_run ./bin/run_on_all_chains.sh "zkstack dev test recovery --snapshot --no-deps --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Run recovery tests (from genesis) run: | - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain era &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain validium &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain custom_token &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain consensus &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 + ci_run ./bin/run_on_all_chains.sh "zkstack dev test recovery --no-deps --no-kill --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Run external node server run: | - ci_run zk_inception external-node run --ignore-prerequisites --chain era &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/rollup.log & - ci_run zk_inception external-node run --ignore-prerequisites --chain validium &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/validium.log & - ci_run zk_inception external-node run --ignore-prerequisites --chain custom_token &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/custom_token.log & - ci_run zk_inception external-node run --ignore-prerequisites --chain consensus --enable-consensus &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/consensus.log & + ci_run zkstack external-node run --ignore-prerequisites --chain era &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/rollup.log & + ci_run zkstack external-node run --ignore-prerequisites --chain validium &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/validium.log & + ci_run zkstack external-node run --ignore-prerequisites --chain custom_token &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/custom_token.log & + ci_run zkstack external-node run --ignore-prerequisites --chain consensus --enable-consensus &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/consensus.log & - name: Run integration tests en run: | - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain era &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain validium &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain custom_token &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain consensus &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 + ci_run ./bin/run_on_all_chains.sh "zkstack dev test integration --no-deps --ignore-prerequisites --external-node" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Fee projection tests run: | ci_run killall -INT zksync_server || true - - ci_run zk_supervisor test fees --no-deps --no-kill --chain era &> ${{ env.FEES_LOGS_DIR }}/era.log & - PID1=$! - - ci_run zk_supervisor test fees --no-deps --no-kill --chain validium &> ${{ env.FEES_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test fees --no-deps --no-kill --chain custom_token &> ${{ env.FEES_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test fees --no-deps --no-kill --chain consensus &> ${{ env.FEES_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 + ci_run ./bin/run_on_all_chains.sh "zkstack dev test fees --no-deps --no-kill" ${{ env.CHAINS }} ${{ env.FEES_LOGS_DIR }} - name: Run revert tests run: | ci_run killall -INT zksync_server || true ci_run killall -INT zksync_external_node || true - ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain era &> ${{ env.REVERT_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain validium &> ${{ env.REVERT_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain custom_token &> ${{ env.REVERT_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain consensus &> ${{ env.REVERT_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 - + ci_run ./bin/run_on_all_chains.sh "zkstack dev test revert --no-deps --external-node --no-kill --ignore-prerequisites" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} # Upgrade tests should run last, because as soon as they # finish the bootloader will be different # TODO make upgrade tests safe to run multiple times - name: Run upgrade test run: | - ci_run zk_supervisor test upgrade --no-deps --chain era + ci_run zkstack dev test upgrade --no-deps --chain era - name: Upload logs diff --git a/.github/workflows/ci-docs-reusable.yml b/.github/workflows/ci-docs-reusable.yml index 5b1d5a9bcdfa..e1a9cf78df7d 100644 --- a/.github/workflows/ci-docs-reusable.yml +++ b/.github/workflows/ci-docs-reusable.yml @@ -27,12 +27,17 @@ jobs: run_retried docker compose pull zk docker compose up -d zk + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local + - name: Build run: | - ci_run ./bin/zkt ci_run yarn install ci_run git config --global --add safe.directory /usr/src/zksync - name: Lints run: | - ci_run zk_supervisor lint -t md --check + ci_run zkstack dev fmt --check + ci_run zkstack dev lint -t md --check diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index 3f842b23488e..6cb9c26d21e7 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -30,10 +30,14 @@ jobs: mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - - name: Init + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local + + - name: Database setup run: | - ci_run zkt - ci_run zk_supervisor db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} + ci_run zkstack dev db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} - name: Formatting run: ci_run bash -c "cd prover && cargo fmt --check" @@ -65,12 +69,16 @@ jobs: mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + - name: Init run: | - ci_run zkt ci_run run_retried rustup show - name: Prover unit tests run: | # Not all tests are enabled, since prover and setup_key_generator_and_server requires bellman-cuda to be present - ci_run zk_supervisor test prover + ci_run zkstack dev test prover diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0a27a719aeb6..fd9dedf8af4e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,7 +20,7 @@ jobs: outputs: core: ${{ steps.changed-files.outputs.core_any_changed }} prover: ${{ steps.changed-files.outputs.prover_any_changed }} - zk_toolbox: ${{ steps.changed-files.outputs.zk_toolbox_any_changed }} + zkstack_cli: ${{ steps.changed-files.outputs.zkstack_cli_any_changed }} docs: ${{ steps.changed-files.outputs.docs_any_changed }} all: ${{ steps.changed-files.outputs.all_any_changed }} steps: @@ -58,7 +58,7 @@ jobs: - '.github/workflows/ci-core-lint-reusable.yml' - 'Cargo.toml' - 'Cargo.lock' - - 'zk_toolbox/**' + - 'zkstack_cli/**' - '!**/*.md' - '!**/*.MD' - 'docker-compose.yml' diff --git a/.github/workflows/new-build-contract-verifier-template.yml b/.github/workflows/new-build-contract-verifier-template.yml index 42791eab6669..3fc83cc62eb7 100644 --- a/.github/workflows/new-build-contract-verifier-template.yml +++ b/.github/workflows/new-build-contract-verifier-template.yml @@ -38,6 +38,7 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo CI=1 >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH + echo $HOME/.local/bin >> $GITHUB_PATH echo CI=1 >> .env echo IN_DOCKER=1 >> .env @@ -128,13 +129,17 @@ jobs: run: | mkdir -p ./volumes/postgres docker compose up -d postgres - zkt || true + + - name: Install zkstack + run: | + ./zkstack_cli/zkstackup/install --path ./zkstack_cli/zkstackup/zkstackup + zkstackup --local || true - name: build contracts shell: bash run: | cp etc/tokens/{test,localhost}.json - zk_supervisor contracts + zkstack dev contracts - name: Upload contracts uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 diff --git a/.github/workflows/new-build-core-template.yml b/.github/workflows/new-build-core-template.yml index fba6a68b8eec..392acbc9f8f1 100644 --- a/.github/workflows/new-build-core-template.yml +++ b/.github/workflows/new-build-core-template.yml @@ -43,6 +43,7 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo CI=1 >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH + echo $HOME/.local/bin >> $GITHUB_PATH echo CI=1 >> .env echo IN_DOCKER=1 >> .env @@ -133,13 +134,17 @@ jobs: run: | mkdir -p ./volumes/postgres docker compose up -d postgres - zkt || true + - name: Install zkstack + run: | + ./zkstack_cli/zkstackup/install --path ./zkstack_cli/zkstackup/zkstackup + zkstackup --local || true + - name: build contracts shell: bash run: | cp etc/tokens/{test,localhost}.json - zk_supervisor contracts + zkstack dev contracts - name: Upload contracts uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 diff --git a/.github/workflows/new-build-prover-template.yml b/.github/workflows/new-build-prover-template.yml index 60c152213e60..5d42696c0b2a 100644 --- a/.github/workflows/new-build-prover-template.yml +++ b/.github/workflows/new-build-prover-template.yml @@ -40,7 +40,7 @@ on: jobs: get-protocol-version: name: Get protocol version - runs-on: [ matterlabs-ci-runner-high-performance ] + runs-on: [matterlabs-ci-runner-high-performance] outputs: protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} steps: @@ -86,7 +86,7 @@ jobs: needs: get-protocol-version env: PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} - runs-on: [ matterlabs-ci-runner-high-performance ] + runs-on: [matterlabs-ci-runner-high-performance] strategy: matrix: components: @@ -96,6 +96,7 @@ jobs: - prover-fri-gateway - prover-job-monitor - proof-fri-gpu-compressor + - prover-autoscaler steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: @@ -166,7 +167,7 @@ jobs: copy-images: name: Copy images between docker registries - needs: [ build-images, get-protocol-version ] + needs: [build-images, get-protocol-version] env: PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} runs-on: matterlabs-ci-runner @@ -187,12 +188,12 @@ jobs: run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://asia-docker.pkg.dev docker buildx imagetools create \ - --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} + --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} - name: Login and push to Europe GAR run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://europe-docker.pkg.dev docker buildx imagetools create \ - --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} + --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index 11a844fdd2ba..18708420dab0 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -39,7 +39,7 @@ jobs: - '!prover/**' setup: name: Setup - runs-on: [ matterlabs-deployer-stage ] + runs-on: [matterlabs-deployer-stage] outputs: image_tag_suffix: ${{ steps.generate-tag-suffix.outputs.image_tag_suffix }} prover_fri_gpu_key_id: ${{ steps.extract-prover-fri-setup-key-ids.outputs.gpu_short_commit_sha }} @@ -58,10 +58,9 @@ jobs: run: | ./prover/extract-setup-data-keys.sh >> $GITHUB_OUTPUT - build-push-core-images: name: Build and push images - needs: [ setup, changed_files ] + needs: [setup, changed_files] uses: ./.github/workflows/build-core-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -72,7 +71,7 @@ jobs: build-push-tee-prover-images: name: Build and push images - needs: [ setup, changed_files ] + needs: [setup, changed_files] uses: ./.github/workflows/build-tee-prover-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -84,7 +83,7 @@ jobs: build-push-contract-verifier: name: Build and push images - needs: [ setup, changed_files ] + needs: [setup, changed_files] uses: ./.github/workflows/build-contract-verifier-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -95,7 +94,7 @@ jobs: build-push-prover-images: name: Build and push images - needs: [ setup, changed_files ] + needs: [setup, changed_files] uses: ./.github/workflows/build-prover-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -108,7 +107,7 @@ jobs: build-push-witness-generator-image-avx512: name: Build and push prover images with avx512 instructions - needs: [ setup, changed_files ] + needs: [setup, changed_files] uses: ./.github/workflows/build-witness-generator-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -122,7 +121,7 @@ jobs: build-gar-prover-fri-gpu-and-circuit-prover-gpu-gar: name: Build GAR prover FRI GPU - needs: [ setup, build-push-prover-images ] + needs: [setup, build-push-prover-images] uses: ./.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index 6e044287ad3d..49830a30cc1e 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -35,6 +35,7 @@ jobs: touch .env echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH + echo $(pwd)/zkstack_cli/zkstackup >> $GITHUB_PATH echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env @@ -44,12 +45,12 @@ jobs: run: | run_retried docker compose pull zk docker compose up -d zk - + - name: run benchmarks on base branch shell: bash run: | - ci_run zkt - ci_run zk_supervisor contracts --system-contracts + ci_run zkstackup -g --local + ci_run zkstack dev contracts --system-contracts ci_run cargo bench --package vm-benchmark --bench iai | tee base-iai ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes || touch base-opcodes @@ -60,8 +61,8 @@ jobs: - name: run benchmarks on PR shell: bash run: | - ci_run zkt - ci_run zk_supervisor contracts --system-contracts + ci_run zkstackup -g --local + ci_run zkstack dev contracts --system-contracts ci_run cargo bench --package vm-benchmark --bench iai | tee pr-iai ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee pr-opcodes || touch pr-opcodes diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index 4c8c90a0d8f2..3a2008e1f8e2 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -28,13 +28,20 @@ jobs: echo "RUSTC_WRAPPER=sccache" >> .env echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH + + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local - name: init run: | run_retried docker compose pull zk docker compose up -d zk - ci_run zkt - ci_run zk_supervisor contracts + + - name: build contracts + run: | + ci_run zkstack dev contracts - name: run benchmarks run: | diff --git a/.gitignore b/.gitignore index bbd13e2319af..86ed40c70417 100644 --- a/.gitignore +++ b/.gitignore @@ -30,7 +30,7 @@ Cargo.lock !/Cargo.lock !/infrastructure/zksync-crypto/Cargo.lock !/prover/Cargo.lock -!/zk_toolbox/Cargo.lock +!/zkstack_cli/Cargo.lock /etc/env/target/* /etc/env/.current @@ -112,7 +112,7 @@ hyperchain-*.yml prover/crates/bin/vk_setup_data_generator_server_fri/data/setup_* prover/data/keys/setup_* -# Zk Toolbox +# ZK Stack CLI chains/era/configs/* chains/gateway/* configs/* diff --git a/Cargo.lock b/Cargo.lock index 5073188d6321..3913b27438eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1539,6 +1539,15 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "const-decoder" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b381abde2cdc1bc3817e394b24e05667a2dc89f37570cbd34d9c397d99e56e3f" +dependencies = [ + "compile-fmt", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -3303,6 +3312,12 @@ dependencies = [ "url", ] +[[package]] +name = "human-repr" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f58b778a5761513caf593693f8951c97a5b610841e754788400f32102eefdff1" + [[package]] name = "hyper" version = "0.14.30" @@ -9640,9 +9655,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4724d51934e475c846ba9e6ed169e25587385188b928a9ecfbbf616092a1c17" +checksum = "035269d811b3770debca372141ab64cad067dce8e58cb39a48cb7617d30c626b" dependencies = [ "anyhow", "once_cell", @@ -9666,8 +9681,12 @@ dependencies = [ "secrecy", "serde", "serde_json", + "strum", + "strum_macros", + "time", "tracing", "url", + "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -9677,9 +9696,9 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e7199c07aa14d9c3319839b98ad0496aac6e72327e70ded77ddb66329766db" +checksum = "a8001633dee671134cf572175a6c4f817904ce5f8d92e9b51f49891c5184a831" dependencies = [ "anyhow", "async-trait", @@ -9699,9 +9718,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7760e7a140f16f0435fbf2ad9a4b09feaad74568d05b553751d222f4803a42e" +checksum = "49e38d1b5ed28c66e785caff53ea4863375555d818aafa03290397192dd3e665" dependencies = [ "anyhow", "blst", @@ -9720,9 +9739,9 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db07f7329b29737d8fd6860b350c809ae1b56ad53e26a7d0eddf3664ccb9dacb" +checksum = "061546668dd779ecb08302d2c84a6419e0093ad42aaa279bf20a8fa2ffda1be4" dependencies = [ "anyhow", "async-trait", @@ -9742,9 +9761,9 @@ dependencies = [ [[package]] name = "zksync_consensus_network" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a89a2d60db1ccd41438d29724a8d0d57fcf9506eb4443ea4b9205fd78c9c8e59" +checksum = "4e9789b5be26d20511bd7930bd9916d91122ff6cb09a28898563152a52f9f5eb" dependencies = [ "anyhow", "async-trait", @@ -9752,6 +9771,7 @@ dependencies = [ "build_html", "bytesize", "http-body-util", + "human-repr", "hyper 1.4.1", "hyper-util", "im", @@ -9778,9 +9798,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96f903187836210602beba27655e111e22efb229ef90bd2a95a3d6799b31685c" +checksum = "e49fbd4e69b276058f3dfc06cf6ada0e8caa6ed826e81289e4d596da95a0f17a" dependencies = [ "anyhow", "bit-vec", @@ -9800,9 +9820,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff43cfd03ea205c763e74362dc6ec5a4d74b6b1baef0fb134dde92a8880397f7" +checksum = "b2b2aab4ed18b13cd584f4edcc2546c8da82f89ac62e525063e12935ff28c9be" dependencies = [ "anyhow", "async-trait", @@ -9820,9 +9840,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1020308512c01ab80327fb874b5b61c6fd513a6b26c8a5fce3e077600da04e4b" +checksum = "10bac8f471b182d4fa3d40cf158aac3624fe636a1ff0b4cf3fe26a0e20c68a42" dependencies = [ "anyhow", "rand 0.8.5", @@ -10050,10 +10070,13 @@ dependencies = [ "tracing", "vise", "zksync_concurrency", + "zksync_consensus_crypto", "zksync_consensus_roles", "zksync_consensus_storage", + "zksync_consensus_utils", "zksync_contracts", "zksync_db_connection", + "zksync_l1_contract_interface", "zksync_protobuf", "zksync_protobuf_build", "zksync_system_constants", @@ -10348,8 +10371,10 @@ dependencies = [ name = "zksync_l1_contract_interface" version = "0.1.0" dependencies = [ + "anyhow", "hex", "once_cell", + "rand 0.8.5", "serde", "serde_json", "serde_with", @@ -10472,6 +10497,7 @@ dependencies = [ "itertools 0.10.5", "once_cell", "pretty_assertions", + "test-casing", "thiserror", "tracing", "vise", @@ -10499,6 +10525,7 @@ dependencies = [ "async-trait", "axum", "chrono", + "const-decoder", "futures 0.3.30", "governor", "hex", @@ -10852,9 +10879,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2d9ce9b9697daae6023c8da5cfe8764690a9d9c91ff32b8e1e54a7c8301fb3" +checksum = "abd55c64f54cb10967a435422f66ff5880ae14a232b245517c7ce38da32e0cab" dependencies = [ "anyhow", "bit-vec", @@ -10873,9 +10900,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903c23a12e160a703f9b68d0dd961daa24156af912ca1bc9efb74969f3acc645" +checksum = "4121952bcaf711005dd554612fc6e2de9b30cb58088508df87f1d38046ce8ac8" dependencies = [ "anyhow", "heck 0.5.0", @@ -10899,6 +10926,7 @@ dependencies = [ "secrecy", "serde_json", "serde_yaml", + "time", "tracing", "zksync_basic_types", "zksync_config", @@ -11313,6 +11341,7 @@ name = "zksync_vm_executor" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "async-trait", "once_cell", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 691341f71ba7..60b5628f4191 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,85 +1,85 @@ [workspace] members = [ - # Binaries - "core/bin/block_reverter", - "core/bin/contract-verifier", - "core/bin/external_node", - "core/bin/merkle_tree_consistency_checker", - "core/bin/snapshots_creator", - "core/bin/selector_generator", - "core/bin/system-constants-generator", - "core/bin/verified_sources_fetcher", - "core/bin/zksync_server", - "core/bin/genesis_generator", - "core/bin/zksync_tee_prover", - # Node services - "core/node/node_framework", - "core/node/proof_data_handler", - "core/node/block_reverter", - "core/node/commitment_generator", - "core/node/house_keeper", - "core/node/genesis", - "core/node/shared_metrics", - "core/node/db_pruner", - "core/node/fee_model", - "core/node/da_dispatcher", - "core/node/eth_sender", - "core/node/vm_runner", - "core/node/test_utils", - "core/node/state_keeper", - "core/node/reorg_detector", - "core/node/consistency_checker", - "core/node/metadata_calculator", - "core/node/node_sync", - "core/node/node_storage_init", - "core/node/consensus", - "core/node/contract_verification_server", - "core/node/api_server", - "core/node/tee_verifier_input_producer", - "core/node/base_token_adjuster", - "core/node/external_proof_integration_api", - "core/node/logs_bloom_backfill", - "core/node/da_clients", - # Libraries - "core/lib/db_connection", - "core/lib/zksync_core_leftovers", - "core/lib/basic_types", - "core/lib/config", - "core/lib/constants", - "core/lib/contract_verifier", - "core/lib/contracts", - "core/lib/circuit_breaker", - "core/lib/dal", - "core/lib/env_config", - "core/lib/da_client", - "core/lib/eth_client", - "core/lib/eth_signer", - "core/lib/l1_contract_interface", - "core/lib/mempool", - "core/lib/merkle_tree", - "core/lib/mini_merkle_tree", - "core/lib/node_framework_derive", - "core/lib/object_store", - "core/lib/prover_interface", - "core/lib/queued_job_processor", - "core/lib/state", - "core/lib/storage", - "core/lib/tee_verifier", - "core/lib/types", - "core/lib/protobuf_config", - "core/lib/utils", - "core/lib/vlog", - "core/lib/multivm", - "core/lib/vm_interface", - "core/lib/vm_executor", - "core/lib/web3_decl", - "core/lib/snapshots_applier", - "core/lib/crypto_primitives", - "core/lib/external_price_api", - # Test infrastructure - "core/tests/test_account", - "core/tests/loadnext", - "core/tests/vm-benchmark", + # Binaries + "core/bin/block_reverter", + "core/bin/contract-verifier", + "core/bin/external_node", + "core/bin/merkle_tree_consistency_checker", + "core/bin/snapshots_creator", + "core/bin/selector_generator", + "core/bin/system-constants-generator", + "core/bin/verified_sources_fetcher", + "core/bin/zksync_server", + "core/bin/genesis_generator", + "core/bin/zksync_tee_prover", + # Node services + "core/node/node_framework", + "core/node/proof_data_handler", + "core/node/block_reverter", + "core/node/commitment_generator", + "core/node/house_keeper", + "core/node/genesis", + "core/node/shared_metrics", + "core/node/db_pruner", + "core/node/fee_model", + "core/node/da_dispatcher", + "core/node/eth_sender", + "core/node/vm_runner", + "core/node/test_utils", + "core/node/state_keeper", + "core/node/reorg_detector", + "core/node/consistency_checker", + "core/node/metadata_calculator", + "core/node/node_sync", + "core/node/node_storage_init", + "core/node/consensus", + "core/node/contract_verification_server", + "core/node/api_server", + "core/node/tee_verifier_input_producer", + "core/node/base_token_adjuster", + "core/node/external_proof_integration_api", + "core/node/logs_bloom_backfill", + "core/node/da_clients", + # Libraries + "core/lib/db_connection", + "core/lib/zksync_core_leftovers", + "core/lib/basic_types", + "core/lib/config", + "core/lib/constants", + "core/lib/contract_verifier", + "core/lib/contracts", + "core/lib/circuit_breaker", + "core/lib/dal", + "core/lib/env_config", + "core/lib/da_client", + "core/lib/eth_client", + "core/lib/eth_signer", + "core/lib/l1_contract_interface", + "core/lib/mempool", + "core/lib/merkle_tree", + "core/lib/mini_merkle_tree", + "core/lib/node_framework_derive", + "core/lib/object_store", + "core/lib/prover_interface", + "core/lib/queued_job_processor", + "core/lib/state", + "core/lib/storage", + "core/lib/tee_verifier", + "core/lib/types", + "core/lib/protobuf_config", + "core/lib/utils", + "core/lib/vlog", + "core/lib/multivm", + "core/lib/vm_interface", + "core/lib/vm_executor", + "core/lib/web3_decl", + "core/lib/snapshots_applier", + "core/lib/crypto_primitives", + "core/lib/external_price_api", + # Test infrastructure + "core/tests/test_account", + "core/tests/loadnext", + "core/tests/vm-benchmark", ] resolver = "2" @@ -114,6 +114,7 @@ blake2 = "0.10" chrono = "0.4" clap = "4.2.2" codegen = "0.2.0" +const-decoder = "0.4.0" criterion = "0.4.0" ctrlc = "3.1" dashmap = "5.5.3" @@ -172,6 +173,7 @@ sqlx = "0.8.1" static_assertions = "1.1" structopt = "0.3.20" strum = "0.26" +strum_macros = "0.26.4" tempfile = "3.0.2" test-casing = "0.1.2" test-log = "0.2.15" @@ -185,7 +187,7 @@ tower-http = "0.5.2" tracing = "0.1" tracing-subscriber = "0.3" tracing-opentelemetry = "0.25.0" -time = "0.3.36" # Has to be same as used by `tracing-subscriber` +time = "0.3.36" # Has to be same as used by `tracing-subscriber` url = "2" web3 = "0.19.0" fraction = "0.15.3" @@ -231,16 +233,16 @@ zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.5" } zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "a233d44bbe61dc6a758a754c3b78fe4f83e56699" } # Consensus dependencies. -zksync_concurrency = "=0.3.0" -zksync_consensus_bft = "=0.3.0" -zksync_consensus_crypto = "=0.3.0" -zksync_consensus_executor = "=0.3.0" -zksync_consensus_network = "=0.3.0" -zksync_consensus_roles = "=0.3.0" -zksync_consensus_storage = "=0.3.0" -zksync_consensus_utils = "=0.3.0" -zksync_protobuf = "=0.3.0" -zksync_protobuf_build = "=0.3.0" +zksync_concurrency = "=0.5.0" +zksync_consensus_bft = "=0.5.0" +zksync_consensus_crypto = "=0.5.0" +zksync_consensus_executor = "=0.5.0" +zksync_consensus_network = "=0.5.0" +zksync_consensus_roles = "=0.5.0" +zksync_consensus_storage = "=0.5.0" +zksync_consensus_utils = "=0.5.0" +zksync_protobuf = "=0.5.0" +zksync_protobuf_build = "=0.5.0" # "Local" dependencies zksync_multivm = { version = "0.1.0", path = "core/lib/multivm" } diff --git a/bin/run_on_all_chains.sh b/bin/run_on_all_chains.sh new file mode 100755 index 000000000000..68b6e81662fd --- /dev/null +++ b/bin/run_on_all_chains.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +# Colors for the terminal output +RED='\033[0;31m' +GREEN='\033[0;32m' +NC='\033[0m' # No Color + + +command=$1 +chain_list=$2 +log_dir=$3 +IFS=',' read -r -a chains <<< "$chain_list" +pids=() +statuses=() + +# Start background processes +for i in "${!chains[@]}"; do + eval "$command --chain ${chains[$i]} &> ${log_dir}/${chains[$i]}.log" & + pids+=($!) +done + +# Wait for all processes to complete and capture their exit statuses +for i in "${!pids[@]}"; do + wait ${pids[$i]} + statuses[$i]=$? +done + +# Check exit statuses and set overall status +overall_status=0 + +for i in "${!statuses[@]}"; do + if [ ${statuses[$i]} -ne 0 ]; then + overall_status=1 + echo -e "${RED}✗ ERROR (exit code ${statuses[$i]}): ${chains[$i]}${NC}" + else + echo -e "${GREEN}✓ SUCCESS: ${chains[$i]}${NC}" + fi +done + +# Exit with overall status +exit $overall_status diff --git a/bin/zk b/bin/zk index 868c4e338cdf..f3b927de8f8e 100755 --- a/bin/zk +++ b/bin/zk @@ -39,6 +39,7 @@ check_yarn_version() { # and it will be hard for them to see what went wrong. check_subdirectory check_yarn_version + if [ -z "$1" ]; then cd $ZKSYNC_HOME run_retried yarn install --frozen-lockfile && yarn utils build && yarn zk build diff --git a/bin/zkt b/bin/zkt deleted file mode 100755 index f781ca67528b..000000000000 --- a/bin/zkt +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -cd $(dirname $0) - -if which zkup >/dev/null; then - cargo uninstall zk_inception - cargo uninstall zk_supervisor - git config --local core.hooksPath || - git config --local core.hooksPath ./.githooks - zkup -p .. --alias -else - echo zkup is not installed, please install it https://github.com/matter-labs/zksync-era/tree/main/zk_toolbox/zkup - cd ../zk_toolbox - cargo install --path ./crates/zk_inception --force - cargo install --path ./crates/zk_supervisor --force -fi - diff --git a/contracts b/contracts index bce4b2d0f34b..aafee035db89 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit bce4b2d0f34bd87f1aaadd291772935afb1c3bd6 +Subproject commit aafee035db892689df3f7afe4b89fd6467a39313 diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index 8b6a7f949dd1..197bd8eb7aa2 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -13,6 +13,7 @@ use std::{ str::FromStr, }; +use anyhow::Context as _; pub use ethabi::{ self, ethereum_types::{ @@ -35,6 +36,21 @@ pub mod url; pub mod vm; pub mod web3; +/// Parses H256 from a slice of bytes. +pub fn parse_h256(bytes: &[u8]) -> anyhow::Result { + Ok(<[u8; 32]>::try_from(bytes).context("invalid size")?.into()) +} + +/// Parses H256 from an optional slice of bytes. +pub fn parse_h256_opt(bytes: Option<&[u8]>) -> anyhow::Result { + parse_h256(bytes.context("missing data")?) +} + +/// Parses H160 from a slice of bytes. +pub fn parse_h160(bytes: &[u8]) -> anyhow::Result { + Ok(<[u8; 20]>::try_from(bytes).context("invalid size")?.into()) +} + /// Account place in the global state tree is uniquely identified by its address. /// Binary this type is represented by 160 bit big-endian representation of account address. #[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Hash, Ord, PartialOrd)] diff --git a/core/lib/basic_types/src/web3/mod.rs b/core/lib/basic_types/src/web3/mod.rs index ecbe73f785b8..aa7c49670333 100644 --- a/core/lib/basic_types/src/web3/mod.rs +++ b/core/lib/basic_types/src/web3/mod.rs @@ -21,6 +21,35 @@ mod tests; pub type Index = U64; +/// Number that can be either hex-encoded or decimal. +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[serde(untagged)] +pub enum U64Number { + Hex(U64), + Number(u64), +} + +impl From for u64 { + fn from(value: U64Number) -> Self { + match value { + U64Number::Hex(number) => number.as_u64(), + U64Number::Number(number) => number, + } + } +} + +impl From for U64Number { + fn from(value: u64) -> Self { + Self::Number(value) + } +} + +impl From for U64Number { + fn from(value: U64) -> Self { + Self::Hex(value) + } +} + // `Signature`, `keccak256`: from `web3::signing` /// A struct that represents the components of a secp256k1 signature. diff --git a/core/lib/basic_types/src/web3/tests.rs b/core/lib/basic_types/src/web3/tests.rs index 7f85bf12eb80..70805ab8b398 100644 --- a/core/lib/basic_types/src/web3/tests.rs +++ b/core/lib/basic_types/src/web3/tests.rs @@ -128,3 +128,13 @@ fn test_bytes_serde_json() { let decoded: Bytes = serde_json::from_str(&encoded).unwrap(); assert_eq!(original, decoded); } + +#[test] +fn deserializing_u64_number() { + let number: U64Number = serde_json::from_value(serde_json::json!(123)).unwrap(); + assert_eq!(u64::from(number), 123); + let number: U64Number = serde_json::from_value(serde_json::json!("0x123")).unwrap(); + assert_eq!(u64::from(number), 0x123); + let number: U64Number = serde_json::from_value(serde_json::json!("123")).unwrap(); + assert_eq!(u64::from(number), 0x123); +} diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index d1ab5ce8438f..af39e5159ba8 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -23,6 +23,10 @@ anyhow.workspace = true rand.workspace = true secrecy.workspace = true serde = { workspace = true, features = ["derive"] } +time = { workspace = true, features = ["serde-human-readable"] } +strum.workspace = true +strum_macros.workspace = true +vise.workspace = true [dev-dependencies] serde_json.workspace = true diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index 918d8f4adab9..7f5a0f56aa17 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -115,6 +115,7 @@ impl RpcConfig { /// Config (shared between main node and external node). #[derive(Clone, Debug, PartialEq)] pub struct ConsensusConfig { + pub port: Option, /// Local socket address to listen for the incoming connections. pub server_addr: std::net::SocketAddr, /// Public address of this node (should forward to `server_addr`) diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 1ad503e0687f..a8d136d632ea 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -60,6 +60,7 @@ pub mod house_keeper; pub mod object_store; pub mod observability; pub mod proof_data_handler; +pub mod prover_autoscaler; pub mod prover_job_monitor; pub mod pruning; pub mod secrets; diff --git a/core/lib/config/src/configs/prover_autoscaler.rs b/core/lib/config/src/configs/prover_autoscaler.rs new file mode 100644 index 000000000000..41131fc1b8c7 --- /dev/null +++ b/core/lib/config/src/configs/prover_autoscaler.rs @@ -0,0 +1,117 @@ +use std::collections::HashMap; + +use serde::Deserialize; +use strum::Display; +use strum_macros::EnumString; +use time::Duration; +use vise::EncodeLabelValue; + +use crate::configs::ObservabilityConfig; + +/// Config used for running ProverAutoscaler (both Scaler and Agent). +#[derive(Debug, Clone, PartialEq)] +pub struct ProverAutoscalerConfig { + /// Amount of time ProverJobMonitor will wait all it's tasks to finish. + // TODO: find a way to use #[serde(with = "humantime_serde")] with time::Duration. + pub graceful_shutdown_timeout: Duration, + pub agent_config: Option, + pub scaler_config: Option, + pub observability: Option, +} + +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct ProverAutoscalerAgentConfig { + /// Port for prometheus metrics connection. + pub prometheus_port: u16, + /// HTTP port for global Scaler to connect to the Agent running in a cluster. + pub http_port: u16, + /// List of namespaces to watch. + #[serde(default = "ProverAutoscalerAgentConfig::default_namespaces")] + pub namespaces: Vec, + /// Watched cluster name. Also can be set via flag. + pub cluster_name: Option, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Default)] +pub struct ProverAutoscalerScalerConfig { + /// Port for prometheus metrics connection. + pub prometheus_port: u16, + /// The interval between runs for global Scaler. + #[serde(default = "ProverAutoscalerScalerConfig::default_scaler_run_interval")] + pub scaler_run_interval: Duration, + /// URL to get queue reports from. + /// In production should be "http://prover-job-monitor.stage2.svc.cluster.local:3074/queue_report". + #[serde(default = "ProverAutoscalerScalerConfig::default_prover_job_monitor_url")] + pub prover_job_monitor_url: String, + /// List of ProverAutoscaler Agents to get cluster data from. + pub agents: Vec, + /// Mapping of namespaces to protocol versions. + pub protocol_versions: HashMap, + /// Default priorities, which cluster to prefer when there is no other information. + pub cluster_priorities: HashMap, + /// Prover speed per GPU. Used to calculate desired number of provers for queue size. + pub prover_speed: HashMap, + /// Duration after which pending pod considered long pending. + #[serde(default = "ProverAutoscalerScalerConfig::default_long_pending_duration")] + pub long_pending_duration: Duration, +} + +#[derive( + Default, + Debug, + Display, + Hash, + PartialEq, + Eq, + Clone, + Copy, + Ord, + PartialOrd, + EnumString, + EncodeLabelValue, + Deserialize, +)] +pub enum Gpu { + #[default] + Unknown, + #[strum(ascii_case_insensitive)] + L4, + #[strum(ascii_case_insensitive)] + T4, + #[strum(ascii_case_insensitive)] + V100, + #[strum(ascii_case_insensitive)] + P100, + #[strum(ascii_case_insensitive)] + A100, +} + +impl ProverAutoscalerConfig { + /// Default graceful shutdown timeout -- 5 seconds + pub fn default_graceful_shutdown_timeout() -> Duration { + Duration::seconds(5) + } +} + +impl ProverAutoscalerAgentConfig { + pub fn default_namespaces() -> Vec { + vec!["prover-blue".to_string(), "prover-red".to_string()] + } +} + +impl ProverAutoscalerScalerConfig { + /// Default scaler_run_interval -- 10s + pub fn default_scaler_run_interval() -> Duration { + Duration::seconds(10) + } + + /// Default prover_job_monitor_url -- cluster local URL + pub fn default_prover_job_monitor_url() -> String { + "http://localhost:3074/queue_report".to_string() + } + + /// Default long_pending_duration -- 10m + pub fn default_long_pending_duration() -> Duration { + Duration::minutes(10) + } +} diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index e1896230b617..cd4db494d974 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -802,6 +802,7 @@ impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::consensus::ConsensusConfig { use configs::consensus::{ConsensusConfig, Host, NodePublicKey}; ConsensusConfig { + port: self.sample(rng), server_addr: self.sample(rng), public_addr: Host(self.sample(rng)), max_payload_size: self.sample(rng), diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index fb28693887a9..7e133f8dee31 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -259,19 +259,19 @@ impl SystemContractsRepo { "artifacts-zk/contracts-preprocessed/{0}{1}.sol/{1}.json", directory, name ))), - ContractLanguage::Yul => read_zbin_bytecode_from_path(self.root.join(format!( - "contracts-preprocessed/{0}artifacts/{1}.yul.zbin", - directory, name - ))), + ContractLanguage::Yul => { + let artifacts_path = self + .root + .join(format!("contracts-preprocessed/{}artifacts/", directory)); + read_yul_bytecode_by_path(artifacts_path, name) + } } } } pub fn read_bootloader_code(bootloader_type: &str) -> Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/build/artifacts/{}.yul.zbin", - bootloader_type - )) + let artifacts_path = "contracts/system-contracts/bootloader/build/artifacts/"; + read_yul_bytecode(artifacts_path, bootloader_type) } fn read_proved_batch_bootloader_bytecode() -> Vec { @@ -288,10 +288,46 @@ pub fn read_zbin_bytecode(relative_zbin_path: impl AsRef) -> Vec { read_zbin_bytecode_from_path(bytecode_path) } +pub fn read_yul_bytecode(relative_artifacts_path: &str, name: &str) -> Vec { + let artifacts_path = Path::new(&home_path()).join(relative_artifacts_path); + read_yul_bytecode_by_path(artifacts_path, name) +} + +pub fn read_yul_bytecode_by_path(artifacts_path: PathBuf, name: &str) -> Vec { + let bytecode_path = artifacts_path.join(format!("{name}.yul/{name}.yul.zbin")); + + // Legacy versions of zksolc use the following path for output data if a yul file is being compiled: .yul.zbin + // New zksolc versions use .yul/.yul.zbin, for consistency with solidity files compilation. + // In addition, the output of the legacy zksolc in this case is a binary file, while in new versions it is hex encoded. + if fs::exists(&bytecode_path) + .unwrap_or_else(|err| panic!("Invalid path: {bytecode_path:?}, {err}")) + { + read_zbin_bytecode_from_hex_file(bytecode_path) + } else { + let bytecode_path_legacy = artifacts_path.join(format!("{name}.yul.zbin")); + + if fs::exists(&bytecode_path_legacy) + .unwrap_or_else(|err| panic!("Invalid path: {bytecode_path_legacy:?}, {err}")) + { + read_zbin_bytecode_from_path(bytecode_path_legacy) + } else { + panic!("Can't find bytecode for '{name}' yul contract at {artifacts_path:?}") + } + } +} + /// Reads zbin bytecode from a given path. fn read_zbin_bytecode_from_path(bytecode_path: PathBuf) -> Vec { fs::read(&bytecode_path) - .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {:?}: {}", bytecode_path, err)) + .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {bytecode_path:?}: {err}")) +} + +/// Reads zbin bytecode from a given path as utf8 text file. +fn read_zbin_bytecode_from_hex_file(bytecode_path: PathBuf) -> Vec { + let bytes = fs::read(&bytecode_path) + .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {bytecode_path:?}: {err}")); + + hex::decode(bytes).unwrap_or_else(|err| panic!("Invalid input file: {bytecode_path:?}, {err}")) } /// Hash of code and code which consists of 32 bytes words diff --git a/core/lib/dal/.sqlx/query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json b/core/lib/dal/.sqlx/query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json similarity index 87% rename from core/lib/dal/.sqlx/query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json rename to core/lib/dal/.sqlx/query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json index 8f41bf3b4916..84f677a36c86 100644 --- a/core/lib/dal/.sqlx/query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json +++ b/core/lib/dal/.sqlx/query-0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -137,6 +137,11 @@ "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -171,8 +176,9 @@ false, true, true, - true + true, + false ] }, - "hash": "860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192" + "hash": "0784f2cc13f85763cc7da29902850fa76a03907957b7a0d87ea55a7873f3312e" } diff --git a/core/lib/dal/.sqlx/query-1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03.json b/core/lib/dal/.sqlx/query-16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47.json similarity index 53% rename from core/lib/dal/.sqlx/query-1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03.json rename to core/lib/dal/.sqlx/query-16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47.json index 9cf4cc1e68e1..36879466039a 100644 --- a/core/lib/dal/.sqlx/query-1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03.json +++ b/core/lib/dal/.sqlx/query-16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version\n FROM\n l1_batches\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n protocol_version\n FROM\n l1_batches\n WHERE\n is_sealed\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ true ] }, - "hash": "1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03" + "hash": "16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47" } diff --git a/core/lib/dal/.sqlx/query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json b/core/lib/dal/.sqlx/query-1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec.json similarity index 73% rename from core/lib/dal/.sqlx/query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json rename to core/lib/dal/.sqlx/query-1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec.json index 853acb9f71a6..a101edbb9ea5 100644 --- a/core/lib/dal/.sqlx/query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json +++ b/core/lib/dal/.sqlx/query-1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n storage_refunds,\n pubdata_costs\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n storage_refunds,\n pubdata_costs\n FROM\n l1_batches\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -24,5 +24,5 @@ true ] }, - "hash": "cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a" + "hash": "1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec" } diff --git a/core/lib/dal/.sqlx/query-a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea.json b/core/lib/dal/.sqlx/query-1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c.json similarity index 71% rename from core/lib/dal/.sqlx/query-a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea.json rename to core/lib/dal/.sqlx/query-1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c.json index fc36e47b54c8..1078e0b57f61 100644 --- a/core/lib/dal/.sqlx/query-a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea.json +++ b/core/lib/dal/.sqlx/query-1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n initial_bootloader_heap_content\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n initial_bootloader_heap_content\n FROM\n l1_batches\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ false ] }, - "hash": "a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea" + "hash": "1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c" } diff --git a/core/lib/dal/.sqlx/query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json b/core/lib/dal/.sqlx/query-1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8.json similarity index 62% rename from core/lib/dal/.sqlx/query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json rename to core/lib/dal/.sqlx/query-1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8.json index 433564c6ae05..aa657582690e 100644 --- a/core/lib/dal/.sqlx/query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json +++ b/core/lib/dal/.sqlx/query-1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n (\n SELECT\n l1_batch_number\n FROM\n miniblocks\n WHERE\n number = $1\n ) AS \"block_batch?\",\n COALESCE(\n (\n SELECT\n MAX(number) + 1\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n ),\n 0\n ) AS \"pending_batch!\"\n ", + "query": "\n SELECT\n (\n SELECT\n l1_batch_number\n FROM\n miniblocks\n WHERE\n number = $1\n ) AS \"block_batch?\",\n COALESCE(\n (\n SELECT\n MAX(number) + 1\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n ),\n 0\n ) AS \"pending_batch!\"\n ", "describe": { "columns": [ { @@ -24,5 +24,5 @@ null ] }, - "hash": "c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5" + "hash": "1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8" } diff --git a/core/lib/dal/.sqlx/query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json b/core/lib/dal/.sqlx/query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json similarity index 63% rename from core/lib/dal/.sqlx/query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json rename to core/lib/dal/.sqlx/query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json index 6f77a656072b..b8f8db874b63 100644 --- a/core/lib/dal/.sqlx/query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json +++ b/core/lib/dal/.sqlx/query-2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.evm_emulator_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.evm_emulator_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", "describe": { "columns": [ { @@ -97,5 +97,5 @@ false ] }, - "hash": "51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6" + "hash": "2049362aad5e32981e48e5c5ef7a00a91254ec6c8a68a359d22b02df5a40911f" } diff --git a/core/lib/dal/.sqlx/query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json b/core/lib/dal/.sqlx/query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json similarity index 81% rename from core/lib/dal/.sqlx/query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json rename to core/lib/dal/.sqlx/query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json index a3d356f4bea9..afac14e6d5cd 100644 --- a/core/lib/dal/.sqlx/query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json +++ b/core/lib/dal/.sqlx/query-2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -137,6 +137,11 @@ "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -174,8 +179,9 @@ false, true, true, - true + true, + false ] }, - "hash": "16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870" + "hash": "2def67eb8372245ed59e76e07d615598f5d22a3aebd893afddded0e3c6b94a3b" } diff --git a/core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json b/core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json deleted file mode 100644 index 7245fa3059ed..000000000000 --- a/core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n bytecode\n FROM\n (\n SELECT\n *\n FROM\n storage_logs\n WHERE\n storage_logs.hashed_key = $1\n AND storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC,\n storage_logs.operation_number DESC\n LIMIT\n 1\n ) t\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE\n value != $3\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "bytecode", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Int8", - "Bytea" - ] - }, - "nullable": [ - false - ] - }, - "hash": "369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980" -} diff --git a/core/lib/dal/.sqlx/query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json b/core/lib/dal/.sqlx/query-4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec.json similarity index 83% rename from core/lib/dal/.sqlx/query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json rename to core/lib/dal/.sqlx/query-4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec.json index 28fbea09998c..804318120fcc 100644 --- a/core/lib/dal/.sqlx/query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json +++ b/core/lib/dal/.sqlx/query-4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n system_logs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n system_logs,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -72,6 +72,11 @@ "ordinal": 13, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 14, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -93,8 +98,9 @@ true, true, false, - true + true, + false ] }, - "hash": "7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e" + "hash": "4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec" } diff --git a/core/lib/dal/.sqlx/query-55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966.json b/core/lib/dal/.sqlx/query-55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966.json new file mode 100644 index 000000000000..2cd528a9f537 --- /dev/null +++ b/core/lib/dal/.sqlx/query-55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_bwip\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_ready_batch!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966" +} diff --git a/core/lib/dal/.sqlx/query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json b/core/lib/dal/.sqlx/query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json similarity index 86% rename from core/lib/dal/.sqlx/query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json rename to core/lib/dal/.sqlx/query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json index 032cf987fc0b..4eae4f778cee 100644 --- a/core/lib/dal/.sqlx/query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json +++ b/core/lib/dal/.sqlx/query-5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -137,6 +137,11 @@ "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -171,8 +176,9 @@ false, true, true, - true + true, + false ] }, - "hash": "da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7" + "hash": "5aa487a98dff53a5d32a5916a26cbf3ffb03b3791c0e9a9f39fb85cfffc65db2" } diff --git a/core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json b/core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json deleted file mode 100644 index c2d9fe2e1ac5..000000000000 --- a/core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_bwip\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "last_ready_batch!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - true - ] - }, - "hash": "5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974" -} diff --git a/core/lib/dal/.sqlx/query-5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a.json b/core/lib/dal/.sqlx/query-5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a.json new file mode 100644 index 000000000000..c95a5bc6bd4a --- /dev/null +++ b/core/lib/dal/.sqlx/query-5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_protective_reads\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_ready_batch!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a" +} diff --git a/core/lib/dal/.sqlx/query-2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d.json b/core/lib/dal/.sqlx/query-5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6.json similarity index 57% rename from core/lib/dal/.sqlx/query-2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d.json rename to core/lib/dal/.sqlx/query-5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6.json index 7c3a261d1f6e..959571601249 100644 --- a/core/lib/dal/.sqlx/query-2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d.json +++ b/core/lib/dal/.sqlx/query-5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n eth_prove_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", + "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n is_sealed\n AND eth_execute_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d" + "hash": "5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6" } diff --git a/core/lib/dal/.sqlx/query-746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a.json b/core/lib/dal/.sqlx/query-746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a.json new file mode 100644 index 000000000000..306f193861f1 --- /dev/null +++ b/core/lib/dal/.sqlx/query-746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a.json @@ -0,0 +1,33 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE l1_batches\n SET\n l1_tx_count = $2,\n l2_tx_count = $3,\n l2_to_l1_messages = $4,\n bloom = $5,\n priority_ops_onchain_data = $6,\n predicted_commit_gas_cost = $7,\n predicted_prove_gas_cost = $8,\n predicted_execute_gas_cost = $9,\n initial_bootloader_heap_content = $10,\n used_contract_hashes = $11,\n bootloader_code_hash = $12,\n default_aa_code_hash = $13,\n evm_emulator_code_hash = $14,\n protocol_version = $15,\n system_logs = $16,\n storage_refunds = $17,\n pubdata_costs = $18,\n pubdata_input = $19,\n predicted_circuits_by_type = $20,\n updated_at = NOW(),\n is_sealed = TRUE\n WHERE\n number = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int4", + "Int4", + "ByteaArray", + "Bytea", + "ByteaArray", + "Int8", + "Int8", + "Int8", + "Jsonb", + "Jsonb", + "Bytea", + "Bytea", + "Bytea", + "Int4", + "ByteaArray", + "Int8Array", + "Int8Array", + "Bytea", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a" +} diff --git a/core/lib/dal/.sqlx/query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json b/core/lib/dal/.sqlx/query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json similarity index 86% rename from core/lib/dal/.sqlx/query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json rename to core/lib/dal/.sqlx/query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json index 9eb67bb8299a..dffd3ed8f9d2 100644 --- a/core/lib/dal/.sqlx/query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json +++ b/core/lib/dal/.sqlx/query-7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -137,6 +137,11 @@ "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -169,8 +174,9 @@ false, true, true, - true + true, + false ] }, - "hash": "9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24" + "hash": "7aebc0d8eb43bd835c4f175edc4c0371bdc118b25d64fcf526bd6575e4d675c8" } diff --git a/core/lib/dal/.sqlx/query-8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a.json b/core/lib/dal/.sqlx/query-8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a.json new file mode 100644 index 000000000000..df856b977026 --- /dev/null +++ b/core/lib/dal/.sqlx/query-8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a.json @@ -0,0 +1,56 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n timestamp,\n protocol_version,\n fee_address,\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n l1_batches\n WHERE\n NOT is_sealed\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 4, + "name": "l1_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 5, + "name": "l2_fair_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 6, + "name": "fair_pubdata_price", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + true, + false, + false, + false, + false + ] + }, + "hash": "8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a" +} diff --git a/core/lib/dal/.sqlx/query-0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41.json b/core/lib/dal/.sqlx/query-8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20.json similarity index 72% rename from core/lib/dal/.sqlx/query-0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41.json rename to core/lib/dal/.sqlx/query-8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20.json index 100761f54b41..ea2b51d69d1a 100644 --- a/core/lib/dal/.sqlx/query-0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41.json +++ b/core/lib/dal/.sqlx/query-8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n protocol_version\n FROM\n l1_batches\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ true ] }, - "hash": "0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41" + "hash": "8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20" } diff --git a/core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json b/core/lib/dal/.sqlx/query-8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2.json similarity index 73% rename from core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json rename to core/lib/dal/.sqlx/query-8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2.json index 9b989a9ba251..82af00b56061 100644 --- a/core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json +++ b/core/lib/dal/.sqlx/query-8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n MAX(number) AS \"number\"\n FROM\n l1_batches\n ", + "query": "\n SELECT\n MAX(number) AS \"number\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ null ] }, - "hash": "38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c" + "hash": "8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2" } diff --git a/core/lib/dal/.sqlx/query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json b/core/lib/dal/.sqlx/query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json similarity index 75% rename from core/lib/dal/.sqlx/query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json rename to core/lib/dal/.sqlx/query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json index 55d56cc4ab00..8c22b4f92c4e 100644 --- a/core/lib/dal/.sqlx/query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json +++ b/core/lib/dal/.sqlx/query-942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -137,6 +137,11 @@ "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -175,8 +180,9 @@ false, true, true, - true + true, + false ] }, - "hash": "9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361" + "hash": "942d6d948770c374ba4d3566c50e56e43137ac0cf45312d70dec0c407cadc1bf" } diff --git a/core/lib/dal/.sqlx/query-86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d.json b/core/lib/dal/.sqlx/query-9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77.json similarity index 69% rename from core/lib/dal/.sqlx/query-86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d.json rename to core/lib/dal/.sqlx/query-9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77.json index f97990794423..08e3b4b17a9e 100644 --- a/core/lib/dal/.sqlx/query-86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d.json +++ b/core/lib/dal/.sqlx/query-9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n MIN(number) AS \"min?\"\n FROM\n l1_batches\n WHERE\n protocol_version = $1\n ", + "query": "\n SELECT\n MIN(number) AS \"min?\"\n FROM\n l1_batches\n WHERE\n is_sealed\n AND protocol_version = $1\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ null ] }, - "hash": "86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d" + "hash": "9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77" } diff --git a/core/lib/dal/.sqlx/query-43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1.json b/core/lib/dal/.sqlx/query-a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026.json similarity index 73% rename from core/lib/dal/.sqlx/query-43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1.json rename to core/lib/dal/.sqlx/query-a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026.json index 56fcdb389430..9a1b043e5731 100644 --- a/core/lib/dal/.sqlx/query-43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1.json +++ b/core/lib/dal/.sqlx/query-a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n MIN(number) AS \"number\"\n FROM\n l1_batches\n ", + "query": "\n SELECT\n MIN(number) AS \"number\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ null ] }, - "hash": "43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1" + "hash": "a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026" } diff --git a/core/lib/dal/.sqlx/query-b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798.json b/core/lib/dal/.sqlx/query-b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798.json new file mode 100644 index 000000000000..78b913fcc36a --- /dev/null +++ b/core/lib/dal/.sqlx/query-b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n l1_batches (\n number,\n timestamp,\n protocol_version,\n fee_address,\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n initial_bootloader_heap_content,\n used_contract_hashes,\n created_at,\n updated_at,\n is_sealed\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n 0,\n 0,\n ''::bytea,\n '{}'::bytea [],\n '{}'::jsonb,\n '{}'::jsonb,\n NOW(),\n NOW(),\n FALSE\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Int4", + "Bytea", + "Int8", + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798" +} diff --git a/core/lib/dal/.sqlx/query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json b/core/lib/dal/.sqlx/query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json similarity index 89% rename from core/lib/dal/.sqlx/query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json rename to core/lib/dal/.sqlx/query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json index 752e171f58cb..80a6946026b0 100644 --- a/core/lib/dal/.sqlx/query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json +++ b/core/lib/dal/.sqlx/query-b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -137,6 +137,11 @@ "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -171,8 +176,9 @@ true, true, true, - true + true, + false ] }, - "hash": "4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98" + "hash": "b456147560b107640abdc10f7ac76b563ff2f0f3a818e8c8a02c2ef632d0b960" } diff --git a/core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json b/core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json deleted file mode 100644 index 120fac1021f5..000000000000 --- a/core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_protective_reads\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "last_ready_batch!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - true - ] - }, - "hash": "b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c" -} diff --git a/core/lib/dal/.sqlx/query-c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967.json b/core/lib/dal/.sqlx/query-c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967.json new file mode 100644 index 000000000000..20b791991650 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n bytecode_hash,\n bytecode\n FROM\n (\n SELECT\n value\n FROM\n storage_logs\n WHERE\n storage_logs.hashed_key = $1\n AND storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC,\n storage_logs.operation_number DESC\n LIMIT\n 1\n ) deploy_log\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE\n value != $3\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "bytecode_hash", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "bytecode", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Bytea", + "Int8", + "Bytea" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967" +} diff --git a/core/lib/dal/.sqlx/query-31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8.json b/core/lib/dal/.sqlx/query-cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b.json similarity index 74% rename from core/lib/dal/.sqlx/query-31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8.json rename to core/lib/dal/.sqlx/query-cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b.json index c63ea98db44b..8f6d1cf7a5f6 100644 --- a/core/lib/dal/.sqlx/query-31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8.json +++ b/core/lib/dal/.sqlx/query-cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n l1_batches\n ", + "query": "\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ null ] }, - "hash": "31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8" + "hash": "cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b" } diff --git a/core/lib/dal/.sqlx/query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json b/core/lib/dal/.sqlx/query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json similarity index 58% rename from core/lib/dal/.sqlx/query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json rename to core/lib/dal/.sqlx/query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json index b2f195c4e5c1..ed3270de573e 100644 --- a/core/lib/dal/.sqlx/query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json +++ b/core/lib/dal/.sqlx/query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", "describe": { "columns": [ { @@ -132,5 +132,5 @@ false ] }, - "hash": "7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5" + "hash": "d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8" } diff --git a/core/lib/dal/.sqlx/query-d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99.json b/core/lib/dal/.sqlx/query-d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef.json similarity index 57% rename from core/lib/dal/.sqlx/query-d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99.json rename to core/lib/dal/.sqlx/query-d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef.json index 0370a63d65e3..15d6096420fb 100644 --- a/core/lib/dal/.sqlx/query-d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99.json +++ b/core/lib/dal/.sqlx/query-d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", + "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n is_sealed\n AND eth_commit_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99" + "hash": "d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef" } diff --git a/core/lib/dal/.sqlx/query-d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e.json b/core/lib/dal/.sqlx/query-d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e.json new file mode 100644 index 000000000000..0aac086f22a9 --- /dev/null +++ b/core/lib/dal/.sqlx/query-d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\"\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e" +} diff --git a/core/lib/dal/.sqlx/query-877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b.json b/core/lib/dal/.sqlx/query-d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c.json similarity index 57% rename from core/lib/dal/.sqlx/query-877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b.json rename to core/lib/dal/.sqlx/query-d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c.json index 3052b3a04d1a..baabbdb4f24a 100644 --- a/core/lib/dal/.sqlx/query-877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b.json +++ b/core/lib/dal/.sqlx/query-d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n eth_execute_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", + "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n is_sealed\n AND eth_prove_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b" + "hash": "d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c" } diff --git a/core/lib/dal/.sqlx/query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json b/core/lib/dal/.sqlx/query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json similarity index 88% rename from core/lib/dal/.sqlx/query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json rename to core/lib/dal/.sqlx/query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json index 6588ee2f11ef..e55d10d6f9a8 100644 --- a/core/lib/dal/.sqlx/query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json +++ b/core/lib/dal/.sqlx/query-e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -137,6 +137,11 @@ "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -173,8 +178,9 @@ false, true, true, - true + true, + false ] }, - "hash": "b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731" + "hash": "e2d0bd978f76e0ce09b36b0e4b0a2baec4b2531ecaa8da234863e2eb810761c7" } diff --git a/core/lib/dal/.sqlx/query-e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005.json b/core/lib/dal/.sqlx/query-e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005.json deleted file mode 100644 index 2598be6267d1..000000000000 --- a/core/lib/dal/.sqlx/query-e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\"\n FROM\n miniblocks\n WHERE\n number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005" -} diff --git a/core/lib/dal/.sqlx/query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json b/core/lib/dal/.sqlx/query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json similarity index 79% rename from core/lib/dal/.sqlx/query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json rename to core/lib/dal/.sqlx/query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json index c93e6aef3e7c..4f138822ad1b 100644 --- a/core/lib/dal/.sqlx/query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json +++ b/core/lib/dal/.sqlx/query-f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", "describe": { "columns": [ { @@ -137,6 +137,11 @@ "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -172,8 +177,9 @@ true, true, true, - true + true, + false ] }, - "hash": "05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32" + "hash": "f30748bef5f8d08b60739cdfd9508c8132d0958e4e25f4954e93d2095b4f11e8" } diff --git a/core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json b/core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json deleted file mode 100644 index 4fe32531a3f1..000000000000 --- a/core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_costs,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n $21,\n NOW(),\n NOW()\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int4", - "Int4", - "Int8", - "ByteaArray", - "Bytea", - "ByteaArray", - "Int8", - "Int8", - "Int8", - "Jsonb", - "Jsonb", - "Bytea", - "Bytea", - "Bytea", - "Int4", - "ByteaArray", - "Int8Array", - "Int8Array", - "Bytea", - "Jsonb" - ] - }, - "nullable": [] - }, - "hash": "f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950" -} diff --git a/core/lib/dal/.sqlx/query-d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977.json b/core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json similarity index 58% rename from core/lib/dal/.sqlx/query-d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977.json rename to core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json index 61497cdb1694..c34d38ac2d03 100644 --- a/core/lib/dal/.sqlx/query-d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977.json +++ b/core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n certificate\n FROM\n miniblocks_consensus\n WHERE\n number >= $1\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n certificate\n FROM\n miniblocks_consensus\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -10,13 +10,11 @@ } ], "parameters": { - "Left": [ - "Int8" - ] + "Left": [] }, "nullable": [ false ] }, - "hash": "d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977" + "hash": "fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b" } diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index ccca49525e40..db03b8de9825 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -22,8 +22,11 @@ zksync_types.workspace = true zksync_concurrency.workspace = true zksync_consensus_roles.workspace = true zksync_consensus_storage.workspace = true +zksync_consensus_crypto.workspace = true +zksync_consensus_utils.workspace = true zksync_protobuf.workspace = true zksync_db_connection.workspace = true +zksync_l1_contract_interface.workspace = true itertools.workspace = true thiserror.workspace = true diff --git a/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.down.sql b/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.down.sql new file mode 100644 index 000000000000..3706fc6630bd --- /dev/null +++ b/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.down.sql @@ -0,0 +1,5 @@ +ALTER TABLE l1_batches + RENAME COLUMN is_sealed TO is_finished; +ALTER table l1_batches + DROP COLUMN fair_pubdata_price, + DROP COLUMN fee_address; diff --git a/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.up.sql b/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.up.sql new file mode 100644 index 000000000000..6b08546ea1e5 --- /dev/null +++ b/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.up.sql @@ -0,0 +1,5 @@ +ALTER TABLE l1_batches + RENAME COLUMN is_finished TO is_sealed; +ALTER table l1_batches + ADD COLUMN fair_pubdata_price bigint NOT NULL DEFAULT 0, + ADD COLUMN fee_address bytea NOT NULL DEFAULT '\x0000000000000000000000000000000000000000'::bytea; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 59cc557f36ec..bf1b48130c40 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -17,9 +17,10 @@ use zksync_types::{ aggregated_operations::AggregatedActionType, block::{ BlockGasCount, L1BatchHeader, L1BatchStatistics, L1BatchTreeData, L2BlockHeader, - StorageOracleInfo, + StorageOracleInfo, UnsealedL1BatchHeader, }, commitment::{L1BatchCommitmentArtifacts, L1BatchWithMetadata}, + fee_model::BatchFeeInput, l2_to_l1_log::UserL2ToL1Log, writes::TreeWrite, Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256, @@ -30,7 +31,9 @@ pub use crate::models::storage_block::{L1BatchMetadataError, L1BatchWithOptional use crate::{ models::{ parse_protocol_version, - storage_block::{StorageL1Batch, StorageL1BatchHeader, StorageL2BlockHeader}, + storage_block::{ + StorageL1Batch, StorageL1BatchHeader, StorageL2BlockHeader, UnsealedStorageL1Batch, + }, storage_event::StorageL2ToL1Log, storage_oracle_info::DbStorageOracleInfo, }, @@ -89,6 +92,8 @@ impl BlocksDal<'_, '_> { COUNT(*) AS "count!" FROM l1_batches + WHERE + is_sealed "# ) .instrument("is_genesis_needed") @@ -105,6 +110,8 @@ impl BlocksDal<'_, '_> { MAX(number) AS "number" FROM l1_batches + WHERE + is_sealed "# ) .instrument("get_sealed_l1_batch_number") @@ -140,6 +147,8 @@ impl BlocksDal<'_, '_> { MIN(number) AS "number" FROM l1_batches + WHERE + is_sealed "# ) .instrument("get_earliest_l1_batch_number") @@ -334,12 +343,14 @@ impl BlocksDal<'_, '_> { compressed_state_diffs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + fee_address FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(number.0) ) @@ -370,11 +381,13 @@ impl BlocksDal<'_, '_> { evm_emulator_code_hash, protocol_version, system_logs, - pubdata_input + pubdata_input, + fee_address FROM l1_batches WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(number.0) ) @@ -407,7 +420,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(number.0) ) @@ -438,7 +452,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(number.0) ) @@ -559,7 +574,109 @@ impl BlocksDal<'_, '_> { Ok(()) } + /// Inserts an unsealed L1 batch with some basic information (i.e. runtime related data is either + /// null or set to default value for the corresponding type). pub async fn insert_l1_batch( + &mut self, + unsealed_batch_header: UnsealedL1BatchHeader, + ) -> DalResult<()> { + Self::insert_l1_batch_inner(unsealed_batch_header, self.storage).await + } + + async fn insert_l1_batch_inner( + unsealed_batch_header: UnsealedL1BatchHeader, + conn: &mut Connection<'_, Core>, + ) -> DalResult<()> { + sqlx::query!( + r#" + INSERT INTO + l1_batches ( + number, + timestamp, + protocol_version, + fee_address, + l1_gas_price, + l2_fair_gas_price, + fair_pubdata_price, + l1_tx_count, + l2_tx_count, + bloom, + priority_ops_onchain_data, + initial_bootloader_heap_content, + used_contract_hashes, + created_at, + updated_at, + is_sealed + ) + VALUES + ( + $1, + $2, + $3, + $4, + $5, + $6, + $7, + 0, + 0, + ''::bytea, + '{}'::bytea [], + '{}'::jsonb, + '{}'::jsonb, + NOW(), + NOW(), + FALSE + ) + "#, + i64::from(unsealed_batch_header.number.0), + unsealed_batch_header.timestamp as i64, + unsealed_batch_header.protocol_version.map(|v| v as i32), + unsealed_batch_header.fee_address.as_bytes(), + unsealed_batch_header.fee_input.l1_gas_price() as i64, + unsealed_batch_header.fee_input.fair_l2_gas_price() as i64, + unsealed_batch_header.fee_input.fair_pubdata_price() as i64, + ) + .instrument("insert_l1_batch") + .with_arg("number", &unsealed_batch_header.number) + .execute(conn) + .await?; + Ok(()) + } + + pub async fn ensure_unsealed_l1_batch_exists( + &mut self, + unsealed_batch: UnsealedL1BatchHeader, + ) -> anyhow::Result<()> { + let mut transaction = self.storage.start_transaction().await?; + let unsealed_batch_fetched = Self::get_unsealed_l1_batch_inner(&mut transaction).await?; + + match unsealed_batch_fetched { + None => { + tracing::info!( + "Unsealed batch #{} could not be found; inserting", + unsealed_batch.number + ); + Self::insert_l1_batch_inner(unsealed_batch, &mut transaction).await?; + } + Some(unsealed_batch_fetched) => { + if unsealed_batch_fetched.number != unsealed_batch.number { + anyhow::bail!( + "fetched unsealed L1 batch #{} does not conform to expected L1 batch #{}", + unsealed_batch_fetched.number, + unsealed_batch.number + ) + } + } + } + + transaction.commit().await?; + Ok(()) + } + + /// Marks provided L1 batch as sealed and populates it with all the runtime information. + /// + /// Errors if the batch does not exist. + pub async fn mark_l1_batch_as_sealed( &mut self, header: &L1BatchHeader, initial_bootloader_contents: &[(usize, U256)], @@ -567,9 +684,9 @@ impl BlocksDal<'_, '_> { storage_refunds: &[u32], pubdata_costs: &[i32], predicted_circuits_by_type: CircuitStatistic, // predicted number of circuits for each circuit type - ) -> DalResult<()> { + ) -> anyhow::Result<()> { let initial_bootloader_contents_len = initial_bootloader_contents.len(); - let instrumentation = Instrumented::new("insert_l1_batch") + let instrumentation = Instrumented::new("mark_l1_batch_as_sealed") .with_arg("number", &header.number) .with_arg( "initial_bootloader_contents.len", @@ -596,63 +713,35 @@ impl BlocksDal<'_, '_> { let query = sqlx::query!( r#" - INSERT INTO - l1_batches ( - number, - l1_tx_count, - l2_tx_count, - timestamp, - l2_to_l1_messages, - bloom, - priority_ops_onchain_data, - predicted_commit_gas_cost, - predicted_prove_gas_cost, - predicted_execute_gas_cost, - initial_bootloader_heap_content, - used_contract_hashes, - bootloader_code_hash, - default_aa_code_hash, - evm_emulator_code_hash, - protocol_version, - system_logs, - storage_refunds, - pubdata_costs, - pubdata_input, - predicted_circuits_by_type, - created_at, - updated_at - ) - VALUES - ( - $1, - $2, - $3, - $4, - $5, - $6, - $7, - $8, - $9, - $10, - $11, - $12, - $13, - $14, - $15, - $16, - $17, - $18, - $19, - $20, - $21, - NOW(), - NOW() - ) + UPDATE l1_batches + SET + l1_tx_count = $2, + l2_tx_count = $3, + l2_to_l1_messages = $4, + bloom = $5, + priority_ops_onchain_data = $6, + predicted_commit_gas_cost = $7, + predicted_prove_gas_cost = $8, + predicted_execute_gas_cost = $9, + initial_bootloader_heap_content = $10, + used_contract_hashes = $11, + bootloader_code_hash = $12, + default_aa_code_hash = $13, + evm_emulator_code_hash = $14, + protocol_version = $15, + system_logs = $16, + storage_refunds = $17, + pubdata_costs = $18, + pubdata_input = $19, + predicted_circuits_by_type = $20, + updated_at = NOW(), + is_sealed = TRUE + WHERE + number = $1 "#, i64::from(header.number.0), i32::from(header.l1_tx_count), i32::from(header.l2_tx_count), - header.timestamp as i64, &header.l2_to_l1_messages, header.bloom.as_bytes(), &priority_onchain_data, @@ -675,13 +764,47 @@ impl BlocksDal<'_, '_> { pubdata_input, serde_json::to_value(predicted_circuits_by_type).unwrap(), ); + let update_result = instrumentation.with(query).execute(self.storage).await?; - let mut transaction = self.storage.start_transaction().await?; - instrumentation - .with(query) - .execute(&mut transaction) - .await?; - transaction.commit().await + if update_result.rows_affected() == 0 { + anyhow::bail!( + "L1 batch sealing failed: batch #{} was not found", + header.number + ); + } + + Ok(()) + } + + pub async fn get_unsealed_l1_batch(&mut self) -> DalResult> { + Self::get_unsealed_l1_batch_inner(self.storage).await + } + + async fn get_unsealed_l1_batch_inner( + conn: &mut Connection<'_, Core>, + ) -> DalResult> { + let batch = sqlx::query_as!( + UnsealedStorageL1Batch, + r#" + SELECT + number, + timestamp, + protocol_version, + fee_address, + l1_gas_price, + l2_fair_gas_price, + fair_pubdata_price + FROM + l1_batches + WHERE + NOT is_sealed + "#, + ) + .instrument("get_unsealed_l1_batch") + .fetch_optional(conn) + .await?; + + Ok(batch.map(|b| b.into())) } pub async fn insert_l2_block(&mut self, l2_block_header: &L2BlockHeader) -> DalResult<()> { @@ -1065,7 +1188,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + fee_address FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1252,7 +1376,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + fee_address FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1333,7 +1458,8 @@ impl BlocksDal<'_, '_> { protocol_version, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + fee_address FROM ( SELECT @@ -1407,7 +1533,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + fee_address FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1535,7 +1662,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + fee_address FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1600,7 +1728,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + fee_address FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -1679,7 +1808,8 @@ impl BlocksDal<'_, '_> { system_logs, events_queue_commitment, bootloader_initial_content_commitment, - pubdata_input + pubdata_input, + fee_address FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number @@ -2054,20 +2184,6 @@ impl BlocksDal<'_, '_> { Ok(Some((L2BlockNumber(min as u32), L2BlockNumber(max as u32)))) } - /// Returns `true` if there exists a non-sealed batch (i.e. there is one+ stored L2 block that isn't assigned - /// to any batch yet). - pub async fn pending_batch_exists(&mut self) -> DalResult { - let count = sqlx::query_scalar!( - "SELECT COUNT(miniblocks.number) FROM miniblocks WHERE l1_batch_number IS NULL" - ) - .instrument("pending_batch_exists") - .fetch_one(self.storage) - .await? - .unwrap_or(0); - - Ok(count != 0) - } - // methods used for measuring Eth tx stage transition latencies // and emitting metrics base on these measured data pub async fn oldest_uncommitted_batch_timestamp(&mut self) -> DalResult> { @@ -2078,7 +2194,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - eth_commit_tx_id IS NULL + is_sealed + AND eth_commit_tx_id IS NULL AND number > 0 ORDER BY number @@ -2100,7 +2217,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - eth_prove_tx_id IS NULL + is_sealed + AND eth_prove_tx_id IS NULL AND number > 0 ORDER BY number @@ -2122,7 +2240,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - eth_execute_tx_id IS NULL + is_sealed + AND eth_execute_tx_id IS NULL AND number > 0 ORDER BY number @@ -2147,7 +2266,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(l1_batch_number.0) ) @@ -2217,7 +2337,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - protocol_version = $1 + is_sealed + AND protocol_version = $1 "#, protocol_version as i32 ) @@ -2523,8 +2644,12 @@ impl BlocksDal<'_, '_> { Ok(()) } - pub async fn insert_mock_l1_batch(&mut self, header: &L1BatchHeader) -> DalResult<()> { + pub async fn insert_mock_l1_batch(&mut self, header: &L1BatchHeader) -> anyhow::Result<()> { self.insert_l1_batch( + header.to_unsealed_header(BatchFeeInput::pubdata_independent(100, 100, 100)), + ) + .await?; + self.mark_l1_batch_as_sealed( header, &[], Default::default(), @@ -2835,7 +2960,13 @@ mod tests { execute: 10, }; conn.blocks_dal() - .insert_l1_batch(&header, &[], predicted_gas, &[], &[], Default::default()) + .insert_l1_batch( + header.to_unsealed_header(BatchFeeInput::pubdata_independent(100, 100, 100)), + ) + .await + .unwrap(); + conn.blocks_dal() + .mark_l1_batch_as_sealed(&header, &[], predicted_gas, &[], &[], Default::default()) .await .unwrap(); @@ -2843,7 +2974,13 @@ mod tests { header.timestamp += 100; predicted_gas += predicted_gas; conn.blocks_dal() - .insert_l1_batch(&header, &[], predicted_gas, &[], &[], Default::default()) + .insert_l1_batch( + header.to_unsealed_header(BatchFeeInput::pubdata_independent(100, 100, 100)), + ) + .await + .unwrap(); + conn.blocks_dal() + .mark_l1_batch_as_sealed(&header, &[], predicted_gas, &[], &[], Default::default()) .await .unwrap(); diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index c1a1e6765b69..829e15b5710a 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -656,6 +656,8 @@ impl BlocksWeb3Dal<'_, '_> { (MAX(number) + 1) FROM l1_batches + WHERE + is_sealed ) ) AS "l1_batch_number!", miniblocks.timestamp, diff --git a/core/lib/dal/src/consensus/conv.rs b/core/lib/dal/src/consensus/conv.rs new file mode 100644 index 000000000000..269c47fa2dd1 --- /dev/null +++ b/core/lib/dal/src/consensus/conv.rs @@ -0,0 +1,519 @@ +//! Protobuf conversion functions. +use anyhow::{anyhow, Context as _}; +use zksync_concurrency::net; +use zksync_consensus_roles::{attester, node}; +use zksync_protobuf::{read_required, required, ProtoFmt, ProtoRepr}; +use zksync_types::{ + abi, ethabi, + fee::Fee, + l1::{OpProcessingType, PriorityQueueType}, + l2::TransactionType, + parse_h160, parse_h256, + protocol_upgrade::ProtocolUpgradeTxCommonData, + transaction_request::PaymasterParams, + Execute, ExecuteTransactionCommon, InputData, L1BatchNumber, L1TxCommonData, L2TxCommonData, + Nonce, PriorityOpId, ProtocolVersionId, Transaction, H256, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +use super::*; + +impl ProtoFmt for BlockMetadata { + type Proto = proto::BlockMetadata; + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + payload_hash: read_required(&r.payload_hash).context("payload_hash")?, + }) + } + fn build(&self) -> Self::Proto { + Self::Proto { + payload_hash: Some(self.payload_hash.build()), + } + } +} + +impl ProtoRepr for proto::NodeAddr { + type Type = (node::PublicKey, net::Host); + fn read(&self) -> anyhow::Result { + Ok(( + read_required(&self.key).context("key")?, + net::Host(required(&self.addr).context("addr")?.clone()), + )) + } + fn build(this: &Self::Type) -> Self { + Self { + key: Some(this.0.build()), + addr: Some(this.1 .0.clone()), + } + } +} + +impl ProtoFmt for GlobalConfig { + type Proto = proto::GlobalConfig; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + genesis: read_required(&r.genesis).context("genesis")?, + registry_address: r + .registry_address + .as_ref() + .map(|a| parse_h160(a)) + .transpose() + .context("registry_address")?, + seed_peers: r + .seed_peers + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("seed_peers")?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + genesis: Some(self.genesis.build()), + registry_address: self.registry_address.map(|a| a.as_bytes().to_vec()), + seed_peers: self + .seed_peers + .iter() + .map(|(k, v)| ProtoRepr::build(&(k.clone(), v.clone()))) + .collect(), + } + } +} +impl ProtoFmt for AttestationStatus { + type Proto = proto::AttestationStatus; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + genesis: read_required(&r.genesis).context("genesis")?, + next_batch_to_attest: attester::BatchNumber( + *required(&r.next_batch_to_attest).context("next_batch_to_attest")?, + ), + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + genesis: Some(self.genesis.build()), + next_batch_to_attest: Some(self.next_batch_to_attest.0), + } + } +} + +impl ProtoFmt for Payload { + type Proto = proto::Payload; + + fn read(r: &Self::Proto) -> anyhow::Result { + let protocol_version = required(&r.protocol_version) + .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) + .context("protocol_version")?; + let mut transactions = vec![]; + + match protocol_version { + v if v >= ProtocolVersionId::Version25 => { + anyhow::ensure!( + r.transactions.is_empty(), + "transactions should be empty in protocol_version {v}" + ); + for (i, tx) in r.transactions_v25.iter().enumerate() { + transactions.push( + tx.read() + .with_context(|| format!("transactions_v25[{i}]"))?, + ); + } + } + v => { + anyhow::ensure!( + r.transactions_v25.is_empty(), + "transactions_v25 should be empty in protocol_version {v}" + ); + for (i, tx) in r.transactions.iter().enumerate() { + transactions.push(tx.read().with_context(|| format!("transactions[{i}]"))?) + } + } + } + + Ok(Self { + protocol_version, + hash: required(&r.hash) + .and_then(|h| parse_h256(h)) + .context("hash")?, + l1_batch_number: L1BatchNumber( + *required(&r.l1_batch_number).context("l1_batch_number")?, + ), + timestamp: *required(&r.timestamp).context("timestamp")?, + l1_gas_price: *required(&r.l1_gas_price).context("l1_gas_price")?, + l2_fair_gas_price: *required(&r.l2_fair_gas_price).context("l2_fair_gas_price")?, + fair_pubdata_price: r.fair_pubdata_price, + virtual_blocks: *required(&r.virtual_blocks).context("virtual_blocks")?, + operator_address: required(&r.operator_address) + .and_then(|a| parse_h160(a)) + .context("operator_address")?, + transactions, + last_in_batch: *required(&r.last_in_batch).context("last_in_batch")?, + }) + } + + fn build(&self) -> Self::Proto { + let mut x = Self::Proto { + protocol_version: Some((self.protocol_version as u16).into()), + hash: Some(self.hash.as_bytes().into()), + l1_batch_number: Some(self.l1_batch_number.0), + timestamp: Some(self.timestamp), + l1_gas_price: Some(self.l1_gas_price), + l2_fair_gas_price: Some(self.l2_fair_gas_price), + fair_pubdata_price: self.fair_pubdata_price, + virtual_blocks: Some(self.virtual_blocks), + operator_address: Some(self.operator_address.as_bytes().into()), + // Transactions are stored in execution order, therefore order is deterministic. + transactions: vec![], + transactions_v25: vec![], + last_in_batch: Some(self.last_in_batch), + }; + match self.protocol_version { + v if v >= ProtocolVersionId::Version25 => { + x.transactions_v25 = self.transactions.iter().map(ProtoRepr::build).collect(); + } + _ => { + x.transactions = self.transactions.iter().map(ProtoRepr::build).collect(); + } + } + x + } +} + +impl ProtoRepr for proto::TransactionV25 { + type Type = Transaction; + + fn read(&self) -> anyhow::Result { + use proto::transaction_v25::T; + let tx = match required(&self.t)? { + T::L1(l1) => abi::Transaction::L1 { + tx: required(&l1.rlp) + .and_then(|x| { + let tokens = ethabi::decode(&[abi::L2CanonicalTransaction::schema()], x) + .context("ethabi::decode()")?; + // Unwrap is safe because `ethabi::decode` does the verification. + let tx = + abi::L2CanonicalTransaction::decode(tokens.into_iter().next().unwrap()) + .context("L2CanonicalTransaction::decode()")?; + Ok(tx) + }) + .context("rlp")? + .into(), + factory_deps: l1.factory_deps.clone(), + eth_block: 0, + }, + T::L2(l2) => abi::Transaction::L2(required(&l2.rlp).context("rlp")?.clone()), + }; + Transaction::from_abi(tx, true) + } + + fn build(tx: &Self::Type) -> Self { + let tx = abi::Transaction::try_from(tx.clone()).unwrap(); + use proto::transaction_v25::T; + Self { + t: Some(match tx { + abi::Transaction::L1 { + tx, factory_deps, .. + } => T::L1(proto::L1Transaction { + rlp: Some(ethabi::encode(&[tx.encode()])), + factory_deps, + }), + abi::Transaction::L2(tx) => T::L2(proto::L2Transaction { rlp: Some(tx) }), + }), + } + } +} + +impl ProtoRepr for proto::Transaction { + type Type = Transaction; + + fn read(&self) -> anyhow::Result { + let common_data = required(&self.common_data).context("common_data")?; + let execute = required(&self.execute).context("execute")?; + Ok(Self::Type { + common_data: match common_data { + proto::transaction::CommonData::L1(common_data) => { + anyhow::ensure!( + *required(&common_data.deadline_block) + .context("common_data.deadline_block")? + == 0 + ); + anyhow::ensure!( + required(&common_data.eth_hash) + .and_then(|x| parse_h256(x)) + .context("common_data.eth_hash")? + == H256::default() + ); + ExecuteTransactionCommon::L1(L1TxCommonData { + sender: required(&common_data.sender_address) + .and_then(|x| parse_h160(x)) + .context("common_data.sender_address")?, + serial_id: required(&common_data.serial_id) + .map(|x| PriorityOpId(*x)) + .context("common_data.serial_id")?, + layer_2_tip_fee: required(&common_data.layer_2_tip_fee) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.layer_2_tip_fee")?, + full_fee: required(&common_data.full_fee) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.full_fee")?, + max_fee_per_gas: required(&common_data.max_fee_per_gas) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.max_fee_per_gas")?, + gas_limit: required(&common_data.gas_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_limit")?, + gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_per_pubdata_limit")?, + op_processing_type: required(&common_data.op_processing_type) + .and_then(|x| { + OpProcessingType::try_from(u8::try_from(*x)?) + .map_err(|_| anyhow!("u8::try_from")) + }) + .context("common_data.op_processing_type")?, + priority_queue_type: required(&common_data.priority_queue_type) + .and_then(|x| { + PriorityQueueType::try_from(u8::try_from(*x)?) + .map_err(|_| anyhow!("u8::try_from")) + }) + .context("common_data.priority_queue_type")?, + eth_block: *required(&common_data.eth_block) + .context("common_data.eth_block")?, + canonical_tx_hash: required(&common_data.canonical_tx_hash) + .and_then(|x| parse_h256(x)) + .context("common_data.canonical_tx_hash")?, + to_mint: required(&common_data.to_mint) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.to_mint")?, + refund_recipient: required(&common_data.refund_recipient_address) + .and_then(|x| parse_h160(x)) + .context("common_data.refund_recipient_address")?, + }) + } + proto::transaction::CommonData::L2(common_data) => { + ExecuteTransactionCommon::L2(L2TxCommonData { + nonce: required(&common_data.nonce) + .map(|x| Nonce(*x)) + .context("common_data.nonce")?, + fee: Fee { + gas_limit: required(&common_data.gas_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_limit")?, + max_fee_per_gas: required(&common_data.max_fee_per_gas) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.max_fee_per_gas")?, + max_priority_fee_per_gas: required( + &common_data.max_priority_fee_per_gas, + ) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.max_priority_fee_per_gas")?, + gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_per_pubdata_limit")?, + }, + initiator_address: required(&common_data.initiator_address) + .and_then(|x| parse_h160(x)) + .context("common_data.initiator_address")?, + signature: required(&common_data.signature) + .context("common_data.signature")? + .clone(), + transaction_type: required(&common_data.transaction_type) + .and_then(|x| Ok(TransactionType::try_from(*x)?)) + .context("common_data.transaction_type")?, + input: { + match &common_data.input { + None => None, + Some(input) => Some(InputData { + hash: required(&input.hash) + .and_then(|x| parse_h256(x)) + .context("common_data.input.hash")?, + data: required(&input.data) + .context("common_data.input.data")? + .clone(), + }), + } + }, + paymaster_params: { + let params = required(&common_data.paymaster_params)?; + PaymasterParams { + paymaster: required(¶ms.paymaster_address) + .and_then(|x| parse_h160(x)) + .context("common_data.paymaster_params.paymaster_address")?, + paymaster_input: required(¶ms.paymaster_input) + .context("common_data.paymaster_params.paymaster_input")? + .clone(), + } + }, + }) + } + proto::transaction::CommonData::ProtocolUpgrade(common_data) => { + ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { + sender: required(&common_data.sender_address) + .and_then(|x| parse_h160(x)) + .context("common_data.sender_address")?, + upgrade_id: required(&common_data.upgrade_id) + .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) + .context("common_data.upgrade_id")?, + max_fee_per_gas: required(&common_data.max_fee_per_gas) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.max_fee_per_gas")?, + gas_limit: required(&common_data.gas_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_limit")?, + gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_per_pubdata_limit")?, + eth_block: *required(&common_data.eth_block) + .context("common_data.eth_block")?, + canonical_tx_hash: required(&common_data.canonical_tx_hash) + .and_then(|x| parse_h256(x)) + .context("common_data.canonical_tx_hash")?, + to_mint: required(&common_data.to_mint) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.to_mint")?, + refund_recipient: required(&common_data.refund_recipient_address) + .and_then(|x| parse_h160(x)) + .context("common_data.refund_recipient_address")?, + }) + } + }, + execute: Execute { + contract_address: execute + .contract_address + .as_ref() + .and_then(|x| parse_h160(x).ok()), + calldata: required(&execute.calldata).context("calldata")?.clone(), + value: required(&execute.value) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("execute.value")?, + factory_deps: execute.factory_deps.clone(), + }, + received_timestamp_ms: 0, // This timestamp is local to the node + raw_bytes: self.raw_bytes.as_ref().map(|x| x.clone().into()), + }) + } + + fn build(this: &Self::Type) -> Self { + let common_data = match &this.common_data { + ExecuteTransactionCommon::L1(data) => { + proto::transaction::CommonData::L1(proto::L1TxCommonData { + sender_address: Some(data.sender.as_bytes().into()), + serial_id: Some(data.serial_id.0), + deadline_block: Some(0), + layer_2_tip_fee: Some(u256_to_h256(data.layer_2_tip_fee).as_bytes().into()), + full_fee: Some(u256_to_h256(data.full_fee).as_bytes().into()), + max_fee_per_gas: Some(u256_to_h256(data.max_fee_per_gas).as_bytes().into()), + gas_limit: Some(u256_to_h256(data.gas_limit).as_bytes().into()), + gas_per_pubdata_limit: Some( + u256_to_h256(data.gas_per_pubdata_limit).as_bytes().into(), + ), + op_processing_type: Some(data.op_processing_type as u32), + priority_queue_type: Some(data.priority_queue_type as u32), + eth_hash: Some(H256::default().as_bytes().into()), + eth_block: Some(data.eth_block), + canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), + to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), + refund_recipient_address: Some(data.refund_recipient.as_bytes().into()), + }) + } + ExecuteTransactionCommon::L2(data) => { + proto::transaction::CommonData::L2(proto::L2TxCommonData { + nonce: Some(data.nonce.0), + gas_limit: Some(u256_to_h256(data.fee.gas_limit).as_bytes().into()), + max_fee_per_gas: Some(u256_to_h256(data.fee.max_fee_per_gas).as_bytes().into()), + max_priority_fee_per_gas: Some( + u256_to_h256(data.fee.max_priority_fee_per_gas) + .as_bytes() + .into(), + ), + gas_per_pubdata_limit: Some( + u256_to_h256(data.fee.gas_per_pubdata_limit) + .as_bytes() + .into(), + ), + initiator_address: Some(data.initiator_address.as_bytes().into()), + signature: Some(data.signature.clone()), + transaction_type: Some(data.transaction_type as u32), + input: data.input.as_ref().map(|input_data| proto::InputData { + data: Some(input_data.data.clone()), + hash: Some(input_data.hash.as_bytes().into()), + }), + paymaster_params: Some(proto::PaymasterParams { + paymaster_input: Some(data.paymaster_params.paymaster_input.clone()), + paymaster_address: Some(data.paymaster_params.paymaster.as_bytes().into()), + }), + }) + } + ExecuteTransactionCommon::ProtocolUpgrade(data) => { + proto::transaction::CommonData::ProtocolUpgrade( + proto::ProtocolUpgradeTxCommonData { + sender_address: Some(data.sender.as_bytes().into()), + upgrade_id: Some(data.upgrade_id as u32), + max_fee_per_gas: Some(u256_to_h256(data.max_fee_per_gas).as_bytes().into()), + gas_limit: Some(u256_to_h256(data.gas_limit).as_bytes().into()), + gas_per_pubdata_limit: Some( + u256_to_h256(data.gas_per_pubdata_limit).as_bytes().into(), + ), + eth_hash: Some(H256::default().as_bytes().into()), + eth_block: Some(data.eth_block), + canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), + to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), + refund_recipient_address: Some(data.refund_recipient.as_bytes().into()), + }, + ) + } + }; + let execute = proto::Execute { + contract_address: this.execute.contract_address.map(|x| x.as_bytes().into()), + calldata: Some(this.execute.calldata.clone()), + value: Some(u256_to_h256(this.execute.value).as_bytes().into()), + factory_deps: this.execute.factory_deps.clone(), + }; + Self { + common_data: Some(common_data), + execute: Some(execute), + raw_bytes: this.raw_bytes.as_ref().map(|inner| inner.0.clone()), + } + } +} + +impl ProtoRepr for proto::AttesterCommittee { + type Type = attester::Committee; + + fn read(&self) -> anyhow::Result { + let members: Vec<_> = self + .members + .iter() + .enumerate() + .map(|(i, m)| attester::WeightedAttester::read(m).context(i)) + .collect::>() + .context("members")?; + Self::Type::new(members) + } + + fn build(this: &Self::Type) -> Self { + Self { + members: this.iter().map(|x| x.build()).collect(), + } + } +} diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index 876dfe14beda..8e88265730e9 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -1,29 +1,20 @@ -pub mod proto; +use std::collections::BTreeMap; +use zksync_concurrency::net; +use zksync_consensus_roles::{attester, node, validator}; +use zksync_types::{ethabi, Address, L1BatchNumber, ProtocolVersionId, Transaction, H256}; + +mod conv; +pub mod proto; #[cfg(test)] mod testonly; #[cfg(test)] mod tests; -use std::collections::BTreeMap; - -use anyhow::{anyhow, Context as _}; -use zksync_concurrency::net; -use zksync_consensus_roles::{attester, node, validator}; -use zksync_protobuf::{read_required, required, ProtoFmt, ProtoRepr}; -use zksync_types::{ - abi, ethabi, - fee::Fee, - l1::{OpProcessingType, PriorityQueueType}, - l2::TransactionType, - protocol_upgrade::ProtocolUpgradeTxCommonData, - transaction_request::PaymasterParams, - Address, Execute, ExecuteTransactionCommon, InputData, L1BatchNumber, L1TxCommonData, - L2TxCommonData, Nonce, PriorityOpId, ProtocolVersionId, Transaction, H256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::models::{parse_h160, parse_h256}; +#[derive(Debug, PartialEq, Clone)] +pub struct BlockMetadata { + pub payload_hash: validator::PayloadHash, +} /// Global config of the consensus. #[derive(Debug, PartialEq, Clone)] @@ -33,57 +24,6 @@ pub struct GlobalConfig { pub seed_peers: BTreeMap, } -impl ProtoRepr for proto::NodeAddr { - type Type = (node::PublicKey, net::Host); - fn read(&self) -> anyhow::Result { - Ok(( - read_required(&self.key).context("key")?, - net::Host(required(&self.addr).context("addr")?.clone()), - )) - } - fn build(this: &Self::Type) -> Self { - Self { - key: Some(this.0.build()), - addr: Some(this.1 .0.clone()), - } - } -} - -impl ProtoFmt for GlobalConfig { - type Proto = proto::GlobalConfig; - - fn read(r: &Self::Proto) -> anyhow::Result { - Ok(Self { - genesis: read_required(&r.genesis).context("genesis")?, - registry_address: r - .registry_address - .as_ref() - .map(|a| parse_h160(a)) - .transpose() - .context("registry_address")?, - seed_peers: r - .seed_peers - .iter() - .enumerate() - .map(|(i, e)| e.read().context(i)) - .collect::>() - .context("seed_peers")?, - }) - } - - fn build(&self) -> Self::Proto { - Self::Proto { - genesis: Some(self.genesis.build()), - registry_address: self.registry_address.map(|a| a.as_bytes().to_vec()), - seed_peers: self - .seed_peers - .iter() - .map(|(k, v)| ProtoRepr::build(&(k.clone(), v.clone()))) - .collect(), - } - } -} - /// Global attestation status served by /// `attestationStatus` RPC. #[derive(Debug, PartialEq, Clone)] @@ -92,26 +32,6 @@ pub struct AttestationStatus { pub next_batch_to_attest: attester::BatchNumber, } -impl ProtoFmt for AttestationStatus { - type Proto = proto::AttestationStatus; - - fn read(r: &Self::Proto) -> anyhow::Result { - Ok(Self { - genesis: read_required(&r.genesis).context("genesis")?, - next_batch_to_attest: attester::BatchNumber( - *required(&r.next_batch_to_attest).context("next_batch_to_attest")?, - ), - }) - } - - fn build(&self) -> Self::Proto { - Self::Proto { - genesis: Some(self.genesis.build()), - next_batch_to_attest: Some(self.next_batch_to_attest.0), - } - } -} - /// L2 block (= miniblock) payload. #[derive(Debug, PartialEq)] pub struct Payload { @@ -128,88 +48,6 @@ pub struct Payload { pub last_in_batch: bool, } -impl ProtoFmt for Payload { - type Proto = proto::Payload; - - fn read(r: &Self::Proto) -> anyhow::Result { - let protocol_version = required(&r.protocol_version) - .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) - .context("protocol_version")?; - let mut transactions = vec![]; - - match protocol_version { - v if v >= ProtocolVersionId::Version25 => { - anyhow::ensure!( - r.transactions.is_empty(), - "transactions should be empty in protocol_version {v}" - ); - for (i, tx) in r.transactions_v25.iter().enumerate() { - transactions.push( - tx.read() - .with_context(|| format!("transactions_v25[{i}]"))?, - ); - } - } - v => { - anyhow::ensure!( - r.transactions_v25.is_empty(), - "transactions_v25 should be empty in protocol_version {v}" - ); - for (i, tx) in r.transactions.iter().enumerate() { - transactions.push(tx.read().with_context(|| format!("transactions[{i}]"))?) - } - } - } - - Ok(Self { - protocol_version, - hash: required(&r.hash) - .and_then(|h| parse_h256(h)) - .context("hash")?, - l1_batch_number: L1BatchNumber( - *required(&r.l1_batch_number).context("l1_batch_number")?, - ), - timestamp: *required(&r.timestamp).context("timestamp")?, - l1_gas_price: *required(&r.l1_gas_price).context("l1_gas_price")?, - l2_fair_gas_price: *required(&r.l2_fair_gas_price).context("l2_fair_gas_price")?, - fair_pubdata_price: r.fair_pubdata_price, - virtual_blocks: *required(&r.virtual_blocks).context("virtual_blocks")?, - operator_address: required(&r.operator_address) - .and_then(|a| parse_h160(a)) - .context("operator_address")?, - transactions, - last_in_batch: *required(&r.last_in_batch).context("last_in_batch")?, - }) - } - - fn build(&self) -> Self::Proto { - let mut x = Self::Proto { - protocol_version: Some((self.protocol_version as u16).into()), - hash: Some(self.hash.as_bytes().into()), - l1_batch_number: Some(self.l1_batch_number.0), - timestamp: Some(self.timestamp), - l1_gas_price: Some(self.l1_gas_price), - l2_fair_gas_price: Some(self.l2_fair_gas_price), - fair_pubdata_price: self.fair_pubdata_price, - virtual_blocks: Some(self.virtual_blocks), - operator_address: Some(self.operator_address.as_bytes().into()), - // Transactions are stored in execution order, therefore order is deterministic. - transactions: vec![], - transactions_v25: vec![], - last_in_batch: Some(self.last_in_batch), - }; - match self.protocol_version { - v if v >= ProtocolVersionId::Version25 => { - x.transactions_v25 = self.transactions.iter().map(ProtoRepr::build).collect(); - } - _ => { - x.transactions = self.transactions.iter().map(ProtoRepr::build).collect(); - } - } - x - } -} - impl Payload { pub fn decode(payload: &validator::Payload) -> anyhow::Result { zksync_protobuf::decode(&payload.0) @@ -219,337 +57,3 @@ impl Payload { validator::Payload(zksync_protobuf::encode(self)) } } - -impl ProtoRepr for proto::TransactionV25 { - type Type = Transaction; - - fn read(&self) -> anyhow::Result { - use proto::transaction_v25::T; - let tx = match required(&self.t)? { - T::L1(l1) => abi::Transaction::L1 { - tx: required(&l1.rlp) - .and_then(|x| { - let tokens = ethabi::decode(&[abi::L2CanonicalTransaction::schema()], x) - .context("ethabi::decode()")?; - // Unwrap is safe because `ethabi::decode` does the verification. - let tx = - abi::L2CanonicalTransaction::decode(tokens.into_iter().next().unwrap()) - .context("L2CanonicalTransaction::decode()")?; - Ok(tx) - }) - .context("rlp")? - .into(), - factory_deps: l1.factory_deps.clone(), - eth_block: 0, - }, - T::L2(l2) => abi::Transaction::L2(required(&l2.rlp).context("rlp")?.clone()), - }; - Transaction::from_abi(tx, true) - } - - fn build(tx: &Self::Type) -> Self { - let tx = abi::Transaction::try_from(tx.clone()).unwrap(); - use proto::transaction_v25::T; - Self { - t: Some(match tx { - abi::Transaction::L1 { - tx, factory_deps, .. - } => T::L1(proto::L1Transaction { - rlp: Some(ethabi::encode(&[tx.encode()])), - factory_deps, - }), - abi::Transaction::L2(tx) => T::L2(proto::L2Transaction { rlp: Some(tx) }), - }), - } - } -} - -impl ProtoRepr for proto::Transaction { - type Type = Transaction; - - fn read(&self) -> anyhow::Result { - let common_data = required(&self.common_data).context("common_data")?; - let execute = required(&self.execute).context("execute")?; - Ok(Self::Type { - common_data: match common_data { - proto::transaction::CommonData::L1(common_data) => { - anyhow::ensure!( - *required(&common_data.deadline_block) - .context("common_data.deadline_block")? - == 0 - ); - anyhow::ensure!( - required(&common_data.eth_hash) - .and_then(|x| parse_h256(x)) - .context("common_data.eth_hash")? - == H256::default() - ); - ExecuteTransactionCommon::L1(L1TxCommonData { - sender: required(&common_data.sender_address) - .and_then(|x| parse_h160(x)) - .context("common_data.sender_address")?, - serial_id: required(&common_data.serial_id) - .map(|x| PriorityOpId(*x)) - .context("common_data.serial_id")?, - layer_2_tip_fee: required(&common_data.layer_2_tip_fee) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.layer_2_tip_fee")?, - full_fee: required(&common_data.full_fee) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.full_fee")?, - max_fee_per_gas: required(&common_data.max_fee_per_gas) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.max_fee_per_gas")?, - gas_limit: required(&common_data.gas_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_limit")?, - gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_per_pubdata_limit")?, - op_processing_type: required(&common_data.op_processing_type) - .and_then(|x| { - OpProcessingType::try_from(u8::try_from(*x)?) - .map_err(|_| anyhow!("u8::try_from")) - }) - .context("common_data.op_processing_type")?, - priority_queue_type: required(&common_data.priority_queue_type) - .and_then(|x| { - PriorityQueueType::try_from(u8::try_from(*x)?) - .map_err(|_| anyhow!("u8::try_from")) - }) - .context("common_data.priority_queue_type")?, - eth_block: *required(&common_data.eth_block) - .context("common_data.eth_block")?, - canonical_tx_hash: required(&common_data.canonical_tx_hash) - .and_then(|x| parse_h256(x)) - .context("common_data.canonical_tx_hash")?, - to_mint: required(&common_data.to_mint) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.to_mint")?, - refund_recipient: required(&common_data.refund_recipient_address) - .and_then(|x| parse_h160(x)) - .context("common_data.refund_recipient_address")?, - }) - } - proto::transaction::CommonData::L2(common_data) => { - ExecuteTransactionCommon::L2(L2TxCommonData { - nonce: required(&common_data.nonce) - .map(|x| Nonce(*x)) - .context("common_data.nonce")?, - fee: Fee { - gas_limit: required(&common_data.gas_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_limit")?, - max_fee_per_gas: required(&common_data.max_fee_per_gas) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.max_fee_per_gas")?, - max_priority_fee_per_gas: required( - &common_data.max_priority_fee_per_gas, - ) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.max_priority_fee_per_gas")?, - gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_per_pubdata_limit")?, - }, - initiator_address: required(&common_data.initiator_address) - .and_then(|x| parse_h160(x)) - .context("common_data.initiator_address")?, - signature: required(&common_data.signature) - .context("common_data.signature")? - .clone(), - transaction_type: required(&common_data.transaction_type) - .and_then(|x| Ok(TransactionType::try_from(*x)?)) - .context("common_data.transaction_type")?, - input: { - match &common_data.input { - None => None, - Some(input) => Some(InputData { - hash: required(&input.hash) - .and_then(|x| parse_h256(x)) - .context("common_data.input.hash")?, - data: required(&input.data) - .context("common_data.input.data")? - .clone(), - }), - } - }, - paymaster_params: { - let params = required(&common_data.paymaster_params)?; - PaymasterParams { - paymaster: required(¶ms.paymaster_address) - .and_then(|x| parse_h160(x)) - .context("common_data.paymaster_params.paymaster_address")?, - paymaster_input: required(¶ms.paymaster_input) - .context("common_data.paymaster_params.paymaster_input")? - .clone(), - } - }, - }) - } - proto::transaction::CommonData::ProtocolUpgrade(common_data) => { - ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: required(&common_data.sender_address) - .and_then(|x| parse_h160(x)) - .context("common_data.sender_address")?, - upgrade_id: required(&common_data.upgrade_id) - .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) - .context("common_data.upgrade_id")?, - max_fee_per_gas: required(&common_data.max_fee_per_gas) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.max_fee_per_gas")?, - gas_limit: required(&common_data.gas_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_limit")?, - gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_per_pubdata_limit")?, - eth_block: *required(&common_data.eth_block) - .context("common_data.eth_block")?, - canonical_tx_hash: required(&common_data.canonical_tx_hash) - .and_then(|x| parse_h256(x)) - .context("common_data.canonical_tx_hash")?, - to_mint: required(&common_data.to_mint) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.to_mint")?, - refund_recipient: required(&common_data.refund_recipient_address) - .and_then(|x| parse_h160(x)) - .context("common_data.refund_recipient_address")?, - }) - } - }, - execute: Execute { - contract_address: execute - .contract_address - .as_ref() - .and_then(|x| parse_h160(x).ok()), - calldata: required(&execute.calldata).context("calldata")?.clone(), - value: required(&execute.value) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("execute.value")?, - factory_deps: execute.factory_deps.clone(), - }, - received_timestamp_ms: 0, // This timestamp is local to the node - raw_bytes: self.raw_bytes.as_ref().map(|x| x.clone().into()), - }) - } - - fn build(this: &Self::Type) -> Self { - let common_data = match &this.common_data { - ExecuteTransactionCommon::L1(data) => { - proto::transaction::CommonData::L1(proto::L1TxCommonData { - sender_address: Some(data.sender.as_bytes().into()), - serial_id: Some(data.serial_id.0), - deadline_block: Some(0), - layer_2_tip_fee: Some(u256_to_h256(data.layer_2_tip_fee).as_bytes().into()), - full_fee: Some(u256_to_h256(data.full_fee).as_bytes().into()), - max_fee_per_gas: Some(u256_to_h256(data.max_fee_per_gas).as_bytes().into()), - gas_limit: Some(u256_to_h256(data.gas_limit).as_bytes().into()), - gas_per_pubdata_limit: Some( - u256_to_h256(data.gas_per_pubdata_limit).as_bytes().into(), - ), - op_processing_type: Some(data.op_processing_type as u32), - priority_queue_type: Some(data.priority_queue_type as u32), - eth_hash: Some(H256::default().as_bytes().into()), - eth_block: Some(data.eth_block), - canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), - to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), - refund_recipient_address: Some(data.refund_recipient.as_bytes().into()), - }) - } - ExecuteTransactionCommon::L2(data) => { - proto::transaction::CommonData::L2(proto::L2TxCommonData { - nonce: Some(data.nonce.0), - gas_limit: Some(u256_to_h256(data.fee.gas_limit).as_bytes().into()), - max_fee_per_gas: Some(u256_to_h256(data.fee.max_fee_per_gas).as_bytes().into()), - max_priority_fee_per_gas: Some( - u256_to_h256(data.fee.max_priority_fee_per_gas) - .as_bytes() - .into(), - ), - gas_per_pubdata_limit: Some( - u256_to_h256(data.fee.gas_per_pubdata_limit) - .as_bytes() - .into(), - ), - initiator_address: Some(data.initiator_address.as_bytes().into()), - signature: Some(data.signature.clone()), - transaction_type: Some(data.transaction_type as u32), - input: data.input.as_ref().map(|input_data| proto::InputData { - data: Some(input_data.data.clone()), - hash: Some(input_data.hash.as_bytes().into()), - }), - paymaster_params: Some(proto::PaymasterParams { - paymaster_input: Some(data.paymaster_params.paymaster_input.clone()), - paymaster_address: Some(data.paymaster_params.paymaster.as_bytes().into()), - }), - }) - } - ExecuteTransactionCommon::ProtocolUpgrade(data) => { - proto::transaction::CommonData::ProtocolUpgrade( - proto::ProtocolUpgradeTxCommonData { - sender_address: Some(data.sender.as_bytes().into()), - upgrade_id: Some(data.upgrade_id as u32), - max_fee_per_gas: Some(u256_to_h256(data.max_fee_per_gas).as_bytes().into()), - gas_limit: Some(u256_to_h256(data.gas_limit).as_bytes().into()), - gas_per_pubdata_limit: Some( - u256_to_h256(data.gas_per_pubdata_limit).as_bytes().into(), - ), - eth_hash: Some(H256::default().as_bytes().into()), - eth_block: Some(data.eth_block), - canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), - to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), - refund_recipient_address: Some(data.refund_recipient.as_bytes().into()), - }, - ) - } - }; - let execute = proto::Execute { - contract_address: this.execute.contract_address.map(|x| x.as_bytes().into()), - calldata: Some(this.execute.calldata.clone()), - value: Some(u256_to_h256(this.execute.value).as_bytes().into()), - factory_deps: this.execute.factory_deps.clone(), - }; - Self { - common_data: Some(common_data), - execute: Some(execute), - raw_bytes: this.raw_bytes.as_ref().map(|inner| inner.0.clone()), - } - } -} - -impl ProtoRepr for proto::AttesterCommittee { - type Type = attester::Committee; - - fn read(&self) -> anyhow::Result { - let members: Vec<_> = self - .members - .iter() - .enumerate() - .map(|(i, m)| attester::WeightedAttester::read(m).context(i)) - .collect::>() - .context("members")?; - Self::Type::new(members) - } - - fn build(this: &Self::Type) -> Self { - Self { - members: this.iter().map(|x| x.build()).collect(), - } - } -} diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index ab1245f3ef6a..421904bf966b 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -6,6 +6,10 @@ import "zksync/roles/validator.proto"; import "zksync/roles/attester.proto"; import "zksync/roles/node.proto"; +message BlockMetadata { + optional roles.validator.PayloadHash payload_hash = 1; // required +} + message Payload { // zksync-era ProtocolVersionId optional uint32 protocol_version = 9; // required; u16 diff --git a/core/lib/dal/src/consensus/testonly.rs b/core/lib/dal/src/consensus/testonly.rs index 904a4c563d2a..13086323b178 100644 --- a/core/lib/dal/src/consensus/testonly.rs +++ b/core/lib/dal/src/consensus/testonly.rs @@ -1,11 +1,17 @@ -use rand::{ - distributions::{Distribution, Standard}, - Rng, -}; +use rand::{distributions::Distribution, Rng}; +use zksync_consensus_utils::EncodeDist; -use super::AttestationStatus; +use super::*; -impl Distribution for Standard { +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> BlockMetadata { + BlockMetadata { + payload_hash: rng.gen(), + } + } +} + +impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> AttestationStatus { AttestationStatus { genesis: rng.gen(), @@ -13,3 +19,16 @@ impl Distribution for Standard { } } } + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> GlobalConfig { + GlobalConfig { + genesis: rng.gen(), + registry_address: Some(rng.gen()), + seed_peers: self + .sample_range(rng) + .map(|_| (rng.gen(), self.sample(rng))) + .collect(), + } + } +} diff --git a/core/lib/dal/src/consensus/tests.rs b/core/lib/dal/src/consensus/tests.rs index 7059f1a74ea0..e8342b7446cc 100644 --- a/core/lib/dal/src/consensus/tests.rs +++ b/core/lib/dal/src/consensus/tests.rs @@ -4,7 +4,7 @@ use rand::Rng; use zksync_concurrency::ctx; use zksync_protobuf::{ repr::{decode, encode}, - testonly::{test_encode, test_encode_random}, + testonly::{test_encode, test_encode_all_formats, FmtConv}, ProtoRepr, }; use zksync_test_account::Account; @@ -12,7 +12,7 @@ use zksync_types::{ web3::Bytes, Execute, ExecuteTransactionCommon, L1BatchNumber, ProtocolVersionId, Transaction, }; -use super::{proto, AttestationStatus, Payload}; +use super::*; use crate::tests::mock_protocol_upgrade_transaction; fn execute(rng: &mut impl Rng) -> Execute { @@ -59,7 +59,9 @@ fn payload(rng: &mut impl Rng, protocol_version: ProtocolVersionId) -> Payload { fn test_encoding() { let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - test_encode_random::(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); encode_decode::(l1_transaction(rng)); encode_decode::(l2_transaction(rng)); encode_decode::(l1_transaction(rng)); diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal/mod.rs similarity index 79% rename from core/lib/dal/src/consensus_dal.rs rename to core/lib/dal/src/consensus_dal/mod.rs index dd976f22086c..9515e93f2b3c 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal/mod.rs @@ -1,16 +1,25 @@ use anyhow::Context as _; +use zksync_consensus_crypto::keccak256::Keccak256; use zksync_consensus_roles::{attester, validator}; -use zksync_consensus_storage::{BlockStoreState, ReplicaState}; +use zksync_consensus_storage::{BlockStoreState, Last, ReplicaState}; use zksync_db_connection::{ connection::Connection, error::{DalError, DalResult, SqlxContext}, instrument::{InstrumentExt, Instrumented}, }; -use zksync_types::L2BlockNumber; +use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; +use zksync_types::{L1BatchNumber, L2BlockNumber}; -pub use crate::consensus::{proto, AttestationStatus, GlobalConfig, Payload}; +pub use crate::consensus::{proto, AttestationStatus, BlockMetadata, GlobalConfig, Payload}; use crate::{Core, CoreDal}; +#[cfg(test)] +mod tests; + +pub fn batch_hash(info: &StoredBatchInfo) -> attester::BatchHash { + attester::BatchHash(Keccak256::from_bytes(info.hash().0)) +} + /// Storage access methods for `zksync_core::consensus` module. #[derive(Debug)] pub struct ConsensusDal<'a, 'c> { @@ -305,47 +314,63 @@ impl ConsensusDal<'_, '_> { Ok(next) } - /// Fetches the last consensus certificate. + /// Fetches the block store state. + /// The blocks that are available to consensus are either pre-genesis or + /// have a consensus certificate. /// Currently, certificates are NOT generated synchronously with L2 blocks, - /// so it might NOT be the certificate for the last L2 block. - pub async fn block_certificates_range(&mut self) -> anyhow::Result { - // It cannot be older than genesis first block. - let mut start = self + /// so the `BlockStoreState.last` might be different than the last block in storage. + pub async fn block_store_state(&mut self) -> anyhow::Result { + let first = self.first_block().await.context("first_block()")?; + let cfg = self .global_config() - .await? - .context("genesis()")? - .genesis - .first_block; - start = start.max(self.first_block().await.context("first_block()")?); - let row = sqlx::query!( + .await + .context("global_config()")? + .context("global config is missing")?; + + // If there is a cert in storage, then the block range visible to consensus + // is [first block, block of last cert]. + if let Some(row) = sqlx::query!( r#" SELECT certificate FROM miniblocks_consensus - WHERE - number >= $1 ORDER BY number DESC LIMIT 1 "#, - i64::try_from(start.0)?, ) .instrument("block_certificate_range") .report_latency() .fetch_optional(self.storage) - .await?; - Ok(BlockStoreState { - first: start, - last: row - .map(|row| { + .await? + { + return Ok(BlockStoreState { + first, + last: Some(Last::Final( zksync_protobuf::serde::Deserialize { deny_unknown_fields: true, } - .proto_fmt(row.certificate) - }) - .transpose()?, + .proto_fmt(row.certificate)?, + )), + }); + } + + // Otherwise it is [first block, min(genesis.first_block-1,last block)]. + let next = self + .next_block() + .await + .context("next_block()")? + .min(cfg.genesis.first_block); + Ok(BlockStoreState { + first, + // unwrap is ok, because `next > first >= 0`. + last: if next > first { + Some(Last::PreGenesis(next.prev().unwrap())) + } else { + None + }, }) } @@ -461,6 +486,19 @@ impl ConsensusDal<'_, '_> { .next()) } + /// Fetches L2 block metadata for the given block number. + pub async fn block_metadata( + &mut self, + n: validator::BlockNumber, + ) -> anyhow::Result> { + let Some(b) = self.block_payload(n).await.context("block_payload()")? else { + return Ok(None); + }; + Ok(Some(BlockMetadata { + payload_hash: b.encode().hash(), + })) + } + /// Inserts a certificate for the L2 block `cert.header().number`. /// Fails if certificate doesn't match the stored block. pub async fn insert_block_certificate( @@ -558,15 +596,29 @@ impl ConsensusDal<'_, '_> { )) } + /// Fetches the L1 batch info for the given number. + pub async fn batch_info( + &mut self, + number: attester::BatchNumber, + ) -> anyhow::Result> { + let n = L1BatchNumber(number.0.try_into().context("overflow")?); + Ok(self + .storage + .blocks_dal() + .get_l1_batch_metadata(n) + .await + .context("get_l1_batch_metadata()")? + .map(|x| StoredBatchInfo::from(&x))) + } + /// Inserts a certificate for the L1 batch. /// Noop if a certificate for the same L1 batch is already present. /// Verification against previously stored attester committee is performed. - /// Batch hash is not verified - it cannot be performed due to circular dependency on - /// `zksync_l1_contract_interface`. + /// Batch hash verification is performed. pub async fn insert_batch_certificate( &mut self, cert: &attester::BatchQC, - ) -> anyhow::Result<()> { + ) -> Result<(), InsertCertificateError> { let cfg = self .global_config() .await @@ -577,6 +629,16 @@ impl ConsensusDal<'_, '_> { .await .context("attester_committee()")? .context("attester committee is missing")?; + let hash = batch_hash( + &self + .batch_info(cert.message.number) + .await + .context("batch()")? + .context("batch is missing")?, + ); + if cert.message.hash != hash { + return Err(InsertCertificateError::PayloadMismatch); + } cert.verify(cfg.genesis.hash(), &committee) .context("cert.verify()")?; sqlx::query!( @@ -645,6 +707,8 @@ impl ConsensusDal<'_, '_> { (MAX(number) + 1) FROM l1_batches + WHERE + is_sealed ), ( SELECT @@ -711,158 +775,3 @@ impl ConsensusDal<'_, '_> { })) } } - -#[cfg(test)] -mod tests { - use rand::Rng as _; - use zksync_consensus_roles::{attester, validator}; - use zksync_consensus_storage::ReplicaState; - use zksync_types::ProtocolVersion; - - use super::GlobalConfig; - use crate::{ - tests::{create_l1_batch_header, create_l2_block_header}, - ConnectionPool, Core, CoreDal, - }; - - #[tokio::test] - async fn replica_state_read_write() { - let rng = &mut rand::thread_rng(); - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.connection().await.unwrap(); - assert_eq!(None, conn.consensus_dal().global_config().await.unwrap()); - for n in 0..3 { - let setup = validator::testonly::Setup::new(rng, 3); - let mut genesis = (*setup.genesis).clone(); - genesis.fork_number = validator::ForkNumber(n); - let cfg = GlobalConfig { - genesis: genesis.with_hash(), - registry_address: Some(rng.gen()), - seed_peers: [].into(), // TODO: rng.gen() for Host - }; - conn.consensus_dal() - .try_update_global_config(&cfg) - .await - .unwrap(); - assert_eq!( - cfg, - conn.consensus_dal().global_config().await.unwrap().unwrap() - ); - assert_eq!( - ReplicaState::default(), - conn.consensus_dal().replica_state().await.unwrap() - ); - for _ in 0..5 { - let want: ReplicaState = rng.gen(); - conn.consensus_dal().set_replica_state(&want).await.unwrap(); - assert_eq!( - cfg, - conn.consensus_dal().global_config().await.unwrap().unwrap() - ); - assert_eq!(want, conn.consensus_dal().replica_state().await.unwrap()); - } - } - } - - #[tokio::test] - async fn test_batch_certificate() { - let rng = &mut rand::thread_rng(); - let setup = validator::testonly::Setup::new(rng, 3); - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.connection().await.unwrap(); - let cfg = GlobalConfig { - genesis: setup.genesis.clone(), - registry_address: Some(rng.gen()), - seed_peers: [].into(), - }; - conn.consensus_dal() - .try_update_global_config(&cfg) - .await - .unwrap(); - - let mut make_cert = |number: attester::BatchNumber| { - let m = attester::Batch { - genesis: setup.genesis.hash(), - hash: rng.gen(), - number, - }; - let mut sigs = attester::MultiSig::default(); - for k in &setup.attester_keys { - sigs.add(k.public(), k.sign_msg(m.clone()).sig); - } - attester::BatchQC { - message: m, - signatures: sigs, - } - }; - - // Required for inserting l2 blocks - conn.protocol_versions_dal() - .save_protocol_version_with_tx(&ProtocolVersion::default()) - .await - .unwrap(); - - // Insert some mock L2 blocks and L1 batches - let mut block_number = 0; - let mut batch_number = 0; - for _ in 0..3 { - for _ in 0..3 { - block_number += 1; - let l2_block = create_l2_block_header(block_number); - conn.blocks_dal().insert_l2_block(&l2_block).await.unwrap(); - } - batch_number += 1; - let l1_batch = create_l1_batch_header(batch_number); - conn.blocks_dal() - .insert_mock_l1_batch(&l1_batch) - .await - .unwrap(); - conn.blocks_dal() - .mark_l2_blocks_as_executed_in_l1_batch(l1_batch.number) - .await - .unwrap(); - } - - let n = attester::BatchNumber(batch_number.into()); - - // Insert a batch certificate for the last L1 batch. - let want = make_cert(n); - conn.consensus_dal() - .upsert_attester_committee(n, setup.genesis.attesters.as_ref().unwrap()) - .await - .unwrap(); - conn.consensus_dal() - .insert_batch_certificate(&want) - .await - .unwrap(); - - // Reinserting a cert should fail. - assert!(conn - .consensus_dal() - .insert_batch_certificate(&make_cert(n)) - .await - .is_err()); - - // Retrieve the latest certificate. - let got_n = conn - .consensus_dal() - .last_batch_certificate_number() - .await - .unwrap() - .unwrap(); - let got = conn - .consensus_dal() - .batch_certificate(got_n) - .await - .unwrap() - .unwrap(); - assert_eq!(got, want); - - // Try insert batch certificate for non-existing batch - assert!(conn - .consensus_dal() - .insert_batch_certificate(&make_cert(n.next())) - .await - .is_err()); - } -} diff --git a/core/lib/dal/src/consensus_dal/tests.rs b/core/lib/dal/src/consensus_dal/tests.rs new file mode 100644 index 000000000000..772e7b2bf5e7 --- /dev/null +++ b/core/lib/dal/src/consensus_dal/tests.rs @@ -0,0 +1,186 @@ +use rand::Rng as _; +use zksync_consensus_roles::{attester, validator}; +use zksync_consensus_storage::ReplicaState; +use zksync_types::{ + block::L1BatchTreeData, + commitment::{L1BatchCommitmentArtifacts, L1BatchCommitmentHash}, + ProtocolVersion, +}; + +use super::*; +use crate::{ + tests::{create_l1_batch_header, create_l2_block_header}, + ConnectionPool, Core, CoreDal, +}; + +#[tokio::test] +async fn replica_state_read_write() { + let rng = &mut rand::thread_rng(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + assert_eq!(None, conn.consensus_dal().global_config().await.unwrap()); + for n in 0..3 { + let setup = validator::testonly::Setup::new(rng, 3); + let mut genesis = (*setup.genesis).clone(); + genesis.fork_number = validator::ForkNumber(n); + let cfg = GlobalConfig { + genesis: genesis.with_hash(), + registry_address: Some(rng.gen()), + seed_peers: [].into(), // TODO: rng.gen() for Host + }; + conn.consensus_dal() + .try_update_global_config(&cfg) + .await + .unwrap(); + assert_eq!( + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() + ); + assert_eq!( + ReplicaState::default(), + conn.consensus_dal().replica_state().await.unwrap() + ); + for _ in 0..5 { + let want: ReplicaState = rng.gen(); + conn.consensus_dal().set_replica_state(&want).await.unwrap(); + assert_eq!( + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() + ); + assert_eq!(want, conn.consensus_dal().replica_state().await.unwrap()); + } + } +} + +#[tokio::test] +async fn test_batch_certificate() { + let rng = &mut rand::thread_rng(); + let setup = validator::testonly::Setup::new(rng, 3); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + let cfg = GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: Some(rng.gen()), + seed_peers: [].into(), + }; + conn.consensus_dal() + .try_update_global_config(&cfg) + .await + .unwrap(); + + let make_cert = |number: attester::BatchNumber, hash: attester::BatchHash| { + let m = attester::Batch { + genesis: setup.genesis.hash(), + hash, + number, + }; + let mut sigs = attester::MultiSig::default(); + for k in &setup.attester_keys { + sigs.add(k.public(), k.sign_msg(m.clone()).sig); + } + attester::BatchQC { + message: m, + signatures: sigs, + } + }; + + // Required for inserting l2 blocks + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + // Insert some mock L2 blocks and L1 batches + let mut block_number = 0; + let mut batch_number = 0; + for _ in 0..3 { + for _ in 0..3 { + block_number += 1; + let l2_block = create_l2_block_header(block_number); + conn.blocks_dal().insert_l2_block(&l2_block).await.unwrap(); + } + batch_number += 1; + let l1_batch = create_l1_batch_header(batch_number); + conn.blocks_dal() + .insert_mock_l1_batch(&l1_batch) + .await + .unwrap(); + conn.blocks_dal() + .save_l1_batch_tree_data( + l1_batch.number, + &L1BatchTreeData { + hash: rng.gen(), + rollup_last_leaf_index: rng.gen(), + }, + ) + .await + .unwrap(); + conn.blocks_dal() + .save_l1_batch_commitment_artifacts( + l1_batch.number, + &L1BatchCommitmentArtifacts { + commitment_hash: L1BatchCommitmentHash { + pass_through_data: rng.gen(), + aux_output: rng.gen(), + meta_parameters: rng.gen(), + commitment: rng.gen(), + }, + l2_l1_merkle_root: rng.gen(), + compressed_state_diffs: None, + compressed_initial_writes: None, + compressed_repeated_writes: None, + zkporter_is_available: false, + aux_commitments: None, + }, + ) + .await + .unwrap(); + conn.blocks_dal() + .mark_l2_blocks_as_executed_in_l1_batch(l1_batch.number) + .await + .unwrap(); + } + + let n = attester::BatchNumber(batch_number.into()); + + // Insert a batch certificate for the last L1 batch. + let hash = batch_hash(&conn.consensus_dal().batch_info(n).await.unwrap().unwrap()); + let want = make_cert(n, hash); + conn.consensus_dal() + .upsert_attester_committee(n, setup.genesis.attesters.as_ref().unwrap()) + .await + .unwrap(); + conn.consensus_dal() + .insert_batch_certificate(&want) + .await + .unwrap(); + + // Reinserting a cert should fail. + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n, hash)) + .await + .is_err()); + + // Retrieve the latest certificate. + let got_n = conn + .consensus_dal() + .last_batch_certificate_number() + .await + .unwrap() + .unwrap(); + let got = conn + .consensus_dal() + .batch_certificate(got_n) + .await + .unwrap() + .unwrap(); + assert_eq!(got, want); + + // Try insert batch certificate for non-existing batch + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n.next(), rng.gen())) + .await + .is_err()); +} diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index 479649f85092..12e41ac780ad 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -1,7 +1,6 @@ pub mod storage_block; -use anyhow::Context as _; use zksync_db_connection::error::SqlxContext; -use zksync_types::{ProtocolVersionId, H160, H256}; +use zksync_types::ProtocolVersionId; mod call; pub mod storage_base_token_ratio; @@ -19,18 +18,6 @@ pub mod storage_verification_request; #[cfg(test)] mod tests; -pub(crate) fn parse_h256(bytes: &[u8]) -> anyhow::Result { - Ok(<[u8; 32]>::try_from(bytes).context("invalid size")?.into()) -} - -fn parse_h256_opt(bytes: Option<&[u8]>) -> anyhow::Result { - parse_h256(bytes.context("missing data")?) -} - -pub(crate) fn parse_h160(bytes: &[u8]) -> anyhow::Result { - Ok(<[u8; 20]>::try_from(bytes).context("invalid size")?.into()) -} - pub(crate) fn parse_protocol_version(raw: i32) -> sqlx::Result { u16::try_from(raw) .decode_column("protocol_version")? diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 7e9a9eca9d41..3bb433a05cf8 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -6,7 +6,7 @@ use thiserror::Error; use zksync_contracts::BaseSystemContractsHashes; use zksync_types::{ api, - block::{L1BatchHeader, L2BlockHeader}, + block::{L1BatchHeader, L2BlockHeader, UnsealedL1BatchHeader}, commitment::{L1BatchMetaParameters, L1BatchMetadata}, fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput, PubdataIndependentBatchFeeModelInput}, l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}, @@ -53,6 +53,7 @@ pub(crate) struct StorageL1BatchHeader { // will be exactly 7 (or 8 in the event of a protocol upgrade) system logs. pub system_logs: Vec>, pub pubdata_input: Option>, + pub fee_address: Vec, } impl StorageL1BatchHeader { @@ -90,6 +91,7 @@ impl StorageL1BatchHeader { .protocol_version .map(|v| (v as u16).try_into().unwrap()), pubdata_input: self.pubdata_input, + fee_address: Address::from_slice(&self.fee_address), } } } @@ -152,6 +154,7 @@ pub(crate) struct StorageL1Batch { pub events_queue_commitment: Option>, pub bootloader_initial_content_commitment: Option>, pub pubdata_input: Option>, + pub fee_address: Vec, } impl StorageL1Batch { @@ -189,6 +192,7 @@ impl StorageL1Batch { .protocol_version .map(|v| (v as u16).try_into().unwrap()), pubdata_input: self.pubdata_input, + fee_address: Address::from_slice(&self.fee_address), } } } @@ -263,6 +267,38 @@ impl TryFrom for L1BatchMetadata { } } +/// Partial projection of the columns corresponding to an unsealed [`L1BatchHeader`]. +#[derive(Debug, Clone)] +pub(crate) struct UnsealedStorageL1Batch { + pub number: i64, + pub timestamp: i64, + pub protocol_version: Option, + pub fee_address: Vec, + pub l1_gas_price: i64, + pub l2_fair_gas_price: i64, + pub fair_pubdata_price: Option, +} + +impl From for UnsealedL1BatchHeader { + fn from(batch: UnsealedStorageL1Batch) -> Self { + let protocol_version: Option = batch + .protocol_version + .map(|v| (v as u16).try_into().unwrap()); + Self { + number: L1BatchNumber(batch.number as u32), + timestamp: batch.timestamp as u64, + protocol_version, + fee_address: Address::from_slice(&batch.fee_address), + fee_input: BatchFeeInput::for_protocol_version( + protocol_version.unwrap_or_else(ProtocolVersionId::last_potentially_undefined), + batch.l2_fair_gas_price as u64, + batch.fair_pubdata_price.map(|p| p as u64), + batch.l1_gas_price as u64, + ), + } + } +} + #[derive(Debug, Clone, sqlx::FromRow)] pub(crate) struct StorageBlockDetails { pub number: i64, diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index cf7b76d81633..7a4ebe074fe0 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -1,13 +1,11 @@ use zksync_contracts::BaseSystemContractsHashes; use zksync_db_connection::error::SqlxContext; use zksync_types::{ - api::en, Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, Transaction, H256, + api::en, parse_h160, parse_h256, parse_h256_opt, Address, L1BatchNumber, L2BlockNumber, + ProtocolVersionId, Transaction, H256, }; -use crate::{ - consensus_dal::Payload, - models::{parse_h160, parse_h256, parse_h256_opt, parse_protocol_version}, -}; +use crate::{consensus_dal::Payload, models::parse_protocol_version}; #[derive(Debug, Clone, sqlx::FromRow)] pub(crate) struct StorageSyncBlock { diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index 3b500e07a08a..3382d8c836e5 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -381,6 +381,8 @@ impl ProtocolVersionsDal<'_, '_> { protocol_version FROM l1_batches + WHERE + is_sealed ORDER BY number DESC LIMIT diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index f3a20ac39fa0..10d2cfe61525 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -15,6 +15,13 @@ use zksync_utils::h256_to_u256; use crate::{models::storage_block::ResolvedL1BatchForL2Block, Core, CoreDal}; +/// Raw bytecode information returned by [`StorageWeb3Dal::get_contract_code_unchecked()`]. +#[derive(Debug)] +pub struct RawBytecode { + pub bytecode_hash: H256, + pub bytecode: Vec, +} + #[derive(Debug)] pub struct StorageWeb3Dal<'a, 'c> { pub(crate) storage: &'a mut Connection<'c, Core>, @@ -178,6 +185,8 @@ impl StorageWeb3Dal<'_, '_> { MAX(number) + 1 FROM l1_batches + WHERE + is_sealed ), ( SELECT @@ -232,16 +241,17 @@ impl StorageWeb3Dal<'_, '_> { &mut self, address: Address, block_number: L2BlockNumber, - ) -> DalResult>> { + ) -> DalResult> { let hashed_key = get_code_key(&address).hashed_key(); let row = sqlx::query!( r#" SELECT + bytecode_hash, bytecode FROM ( SELECT - * + value FROM storage_logs WHERE @@ -252,7 +262,7 @@ impl StorageWeb3Dal<'_, '_> { storage_logs.operation_number DESC LIMIT 1 - ) t + ) deploy_log JOIN factory_deps ON value = factory_deps.bytecode_hash WHERE value != $3 @@ -266,7 +276,11 @@ impl StorageWeb3Dal<'_, '_> { .with_arg("block_number", &block_number) .fetch_optional(self.storage) .await?; - Ok(row.map(|row| row.bytecode)) + + Ok(row.map(|row| RawBytecode { + bytecode_hash: H256::from_slice(&row.bytecode_hash), + bytecode: row.bytecode, + })) } /// Given bytecode hash, returns bytecode and L2 block number at which it was inserted. diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index ab5684007d0b..265c61354887 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -35,6 +35,8 @@ impl SyncDal<'_, '_> { (MAX(number) + 1) FROM l1_batches + WHERE + is_sealed ), ( SELECT diff --git a/core/lib/dal/src/vm_runner_dal.rs b/core/lib/dal/src/vm_runner_dal.rs index b12b02186808..df0d3e86b889 100644 --- a/core/lib/dal/src/vm_runner_dal.rs +++ b/core/lib/dal/src/vm_runner_dal.rs @@ -42,6 +42,8 @@ impl VmRunnerDal<'_, '_> { MAX(number) AS "last_batch" FROM l1_batches + WHERE + is_sealed ), processed_batches AS ( @@ -205,6 +207,8 @@ impl VmRunnerDal<'_, '_> { MAX(number) AS "last_batch" FROM l1_batches + WHERE + is_sealed ), processed_batches AS ( diff --git a/core/lib/eth_client/src/clients/http/query.rs b/core/lib/eth_client/src/clients/http/query.rs index 54419f3b5626..5e788509461d 100644 --- a/core/lib/eth_client/src/clients/http/query.rs +++ b/core/lib/eth_client/src/clients/http/query.rs @@ -396,16 +396,12 @@ where let chunk_end = (chunk_start + FEE_HISTORY_MAX_REQUEST_CHUNK).min(upto_block); let chunk_size = chunk_end - chunk_start; - let fee_history = EthNamespaceClient::fee_history( - client, - U64::from(chunk_size), - zksync_types::api::BlockNumber::from(chunk_end), - vec![], - ) - .rpc_context("fee_history") - .with_arg("chunk_size", &chunk_size) - .with_arg("block", &chunk_end) - .await?; + let fee_history = client + .fee_history(U64::from(chunk_size).into(), chunk_end.into(), vec![]) + .rpc_context("fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("block", &chunk_end) + .await?; // Check that the lengths are the same. if fee_history.inner.base_fee_per_gas.len() != fee_history.l2_pubdata_price.len() { diff --git a/core/lib/l1_contract_interface/Cargo.toml b/core/lib/l1_contract_interface/Cargo.toml index 8b68df854e71..1aa4c256e0fb 100644 --- a/core/lib/l1_contract_interface/Cargo.toml +++ b/core/lib/l1_contract_interface/Cargo.toml @@ -19,12 +19,14 @@ crypto_codegen.workspace = true # Used to calculate the kzg commitment and proofs kzg.workspace = true +anyhow.workspace = true sha2.workspace = true sha3.workspace = true hex.workspace = true once_cell.workspace = true [dev-dependencies] +rand.workspace = true serde.workspace = true serde_json.workspace = true serde_with = { workspace = true, features = ["base64", "hex"] } diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs index d1ed57e41f2e..aa9872049015 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs @@ -3,4 +3,7 @@ mod commit_batch_info; mod stored_batch_info; +#[cfg(test)] +mod tests; + pub use self::{commit_batch_info::CommitBatchInfo, stored_batch_info::StoredBatchInfo}; diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs index 8373c46e36bb..26f9b30392ea 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs @@ -1,7 +1,8 @@ +use anyhow::Context as _; use zksync_types::{ commitment::L1BatchWithMetadata, - ethabi::{self, Token}, - web3, + ethabi::{self, ParamType, Token}, + parse_h256, web3, web3::contract::Error as ContractError, H256, U256, }; @@ -9,7 +10,7 @@ use zksync_types::{ use crate::Tokenizable; /// `StoredBatchInfo` from `IExecutor.sol`. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct StoredBatchInfo { pub batch_number: u64, pub batch_hash: H256, @@ -22,11 +23,35 @@ pub struct StoredBatchInfo { } impl StoredBatchInfo { + fn schema() -> Vec { + vec![ParamType::Tuple(vec![ + ParamType::Uint(64), + ParamType::FixedBytes(32), + ParamType::Uint(64), + ParamType::Uint(256), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::Uint(256), + ParamType::FixedBytes(32), + ])] + } + + /// Encodes the struct into RLP. + pub fn encode(&self) -> Vec { + ethabi::encode(&[self.clone().into_token()]) + } + + /// Decodes the struct from RLP. + pub fn decode(rlp: &[u8]) -> anyhow::Result { + let [token] = ethabi::decode_whole(&Self::schema(), rlp)? + .try_into() + .unwrap(); + Ok(Self::from_token(token)?) + } + /// `_hashStoredBatchInfo` from `Executor.sol`. pub fn hash(&self) -> H256 { - H256(web3::keccak256(ðabi::encode(&[self - .clone() - .into_token()]))) + H256(web3::keccak256(&self.encode())) } } @@ -46,11 +71,42 @@ impl From<&L1BatchWithMetadata> for StoredBatchInfo { } impl Tokenizable for StoredBatchInfo { - fn from_token(_token: Token) -> Result { - // Currently there is no need to decode this struct. - // We still want to implement `Tokenizable` trait for it, so that *once* it's needed - // the implementation is provided here and not in some other inconsistent way. - Err(ContractError::Other("Not implemented".into())) + fn from_token(token: Token) -> Result { + (|| { + let [ + Token::Uint(batch_number), + Token::FixedBytes(batch_hash), + Token::Uint(index_repeated_storage_changes), + Token::Uint(number_of_layer1_txs), + Token::FixedBytes(priority_operations_hash), + Token::FixedBytes(l2_logs_tree_root), + Token::Uint(timestamp), + Token::FixedBytes(commitment), + ] : [Token;8] = token + .into_tuple().context("not a tuple")? + .try_into().ok().context("bad length")? + else { anyhow::bail!("bad format") }; + Ok(Self { + batch_number: batch_number + .try_into() + .ok() + .context("overflow") + .context("batch_number")?, + batch_hash: parse_h256(&batch_hash).context("batch_hash")?, + index_repeated_storage_changes: index_repeated_storage_changes + .try_into() + .ok() + .context("overflow") + .context("index_repeated_storage_changes")?, + number_of_layer1_txs, + priority_operations_hash: parse_h256(&priority_operations_hash) + .context("priority_operations_hash")?, + l2_logs_tree_root: parse_h256(&l2_logs_tree_root).context("l2_logs_tree_root")?, + timestamp, + commitment: parse_h256(&commitment).context("commitment")?, + }) + })() + .map_err(|err| ContractError::InvalidOutputType(format!("{err:#}"))) } fn into_token(self) -> Token { diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/tests.rs b/core/lib/l1_contract_interface/src/i_executor/structures/tests.rs new file mode 100644 index 000000000000..0cb8caffb340 --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/structures/tests.rs @@ -0,0 +1,32 @@ +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; + +use super::*; + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> StoredBatchInfo { + StoredBatchInfo { + batch_number: rng.gen(), + batch_hash: rng.gen(), + index_repeated_storage_changes: rng.gen(), + number_of_layer1_txs: rng.gen::().into(), + priority_operations_hash: rng.gen(), + l2_logs_tree_root: rng.gen(), + timestamp: rng.gen::().into(), + commitment: rng.gen(), + } + } +} + +/// Test checking encoding and decoding of `StoredBatchInfo`. +#[test] +fn test_encoding() { + let rng = &mut rand::thread_rng(); + for _ in 0..10 { + let want: StoredBatchInfo = rng.gen(); + let got = StoredBatchInfo::decode(&want.encode()).unwrap(); + assert_eq!(want, got); + } +} diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index ab418d24cd12..e49086a6b8b1 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -42,5 +42,6 @@ ethabi.workspace = true [dev-dependencies] assert_matches.workspace = true pretty_assertions.workspace = true +test-casing.workspace = true zksync_test_account.workspace = true zksync_eth_signer.workspace = true diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs index e171a78e1794..520274c14ae0 100644 --- a/core/lib/multivm/src/lib.rs +++ b/core/lib/multivm/src/lib.rs @@ -16,7 +16,7 @@ pub use crate::{ vm_1_3_2, vm_1_4_1, vm_1_4_2, vm_boojum_integration, vm_fast, vm_latest, vm_m5, vm_m6, vm_refunds_enhancement, vm_virtual_blocks, }, - vm_instance::{FastVmInstance, LegacyVmInstance}, + vm_instance::{is_supported_by_fast_vm, FastVmInstance, LegacyVmInstance}, }; mod glue; diff --git a/core/lib/multivm/src/versions/vm_1_3_2/utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/utils.rs index da4e2f5350f9..7870b1ff7443 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/utils.rs @@ -5,7 +5,7 @@ use zk_evm_1_3_3::{ vm_state::PrimitiveValue, zkevm_opcode_defs::FatPointer, }; -use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; +use zksync_contracts::BaseSystemContracts; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; @@ -221,13 +221,6 @@ pub fn create_test_block_params() -> (BlockContext, BlockProperties) { ) } -pub fn read_bootloader_test_code(test: &str) -> Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )) -} - pub(crate) fn calculate_computational_gas_used< S: WriteStorage, T: PubdataSpentTracer, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs index 5ab5aa0dec92..76ca9bc5dd38 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use ethabi::Contract; use once_cell::sync::Lazy; use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, + load_contract, read_bytecode, read_yul_bytecode, BaseSystemContracts, SystemContractCode, }; use zksync_types::{ utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H160, H256, @@ -64,10 +64,8 @@ pub(crate) fn read_test_contract() -> Vec { } pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); + let artifacts_path = "contracts/system-contracts/bootloader/tests/artifacts/"; + let bootloader_code = read_yul_bytecode(artifacts_path, test); let bootloader_hash = hash_bytecode(&bootloader_code); SystemContractCode { diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 0c20af57e033..39c9b3c56566 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -40,6 +40,7 @@ use crate::{ VmExecutionStatistics, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, VmTrackingContracts, }, + is_supported_by_fast_vm, utils::events::extract_l2tol1logs_from_l1_messenger, vm_fast::{ bootloader_state::utils::{apply_l2_block, apply_pubdata_to_memory}, @@ -104,6 +105,12 @@ pub struct Vm { impl Vm { pub fn custom(batch_env: L1BatchEnv, system_env: SystemEnv, storage: S) -> Self { + assert!( + is_supported_by_fast_vm(system_env.version), + "Protocol version {:?} is not supported by fast VM", + system_env.version + ); + let default_aa_code_hash = system_env .base_system_smart_contracts .default_aa diff --git a/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs index ca8157b170d4..34780b73eb05 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs @@ -1,28 +1,47 @@ +use std::collections::HashMap; + use ethabi::Token; -use zksync_contracts::read_bytecode; -use zksync_system_constants::{CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS}; -use zksync_types::{get_code_key, get_known_code_key, Execute, H256}; -use zksync_utils::{be_words_to_bytes, bytecode::hash_bytecode, h256_to_u256}; -use zksync_vm_interface::VmInterfaceExt; +use test_casing::{test_casing, Product}; +use zksync_contracts::{load_contract, read_bytecode, SystemContractCode}; +use zksync_system_constants::{ + CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L2_BASE_TOKEN_ADDRESS, +}; +use zksync_test_account::TxType; +use zksync_types::{ + get_code_key, get_known_code_key, + utils::{key_for_eth_balance, storage_key_for_eth_balance}, + AccountTreeId, Address, Execute, StorageKey, H256, U256, +}; +use zksync_utils::{ + be_words_to_bytes, + bytecode::{hash_bytecode, hash_evm_bytecode}, + bytes_to_be_words, h256_to_u256, +}; use crate::{ - interface::{storage::InMemoryStorage, TxExecutionMode}, + interface::{ + storage::InMemoryStorage, TxExecutionMode, VmExecutionResultAndLogs, VmInterfaceExt, + }, versions::testonly::default_system_env, - vm_latest::{tests::tester::VmTesterBuilder, utils::hash_evm_bytecode, HistoryEnabled}, + vm_latest::{ + tests::tester::{VmTester, VmTesterBuilder}, + HistoryEnabled, + }, }; const MOCK_DEPLOYER_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockContractDeployer.json"; const MOCK_KNOWN_CODE_STORAGE_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockKnownCodeStorage.json"; +const MOCK_EMULATOR_PATH: &str = + "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockEvmEmulator.json"; +const RECURSIVE_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/NativeRecursiveContract.json"; +const INCREMENTING_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/IncrementingContract.json"; -#[test] -fn tracing_evm_contract_deployment() { +fn override_system_contracts(storage: &mut InMemoryStorage) { let mock_deployer = read_bytecode(MOCK_DEPLOYER_PATH); let mock_deployer_hash = hash_bytecode(&mock_deployer); let mock_known_code_storage = read_bytecode(MOCK_KNOWN_CODE_STORAGE_PATH); let mock_known_code_storage_hash = hash_bytecode(&mock_known_code_storage); - // Override - let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); storage.set_value(get_code_key(&CONTRACT_DEPLOYER_ADDRESS), mock_deployer_hash); storage.set_value( get_known_code_key(&mock_deployer_hash), @@ -38,6 +57,81 @@ fn tracing_evm_contract_deployment() { ); storage.store_factory_dep(mock_deployer_hash, mock_deployer); storage.store_factory_dep(mock_known_code_storage_hash, mock_known_code_storage); +} + +#[derive(Debug)] +struct EvmTestBuilder { + deploy_emulator: bool, + storage: InMemoryStorage, + evm_contract_addresses: Vec
, +} + +impl EvmTestBuilder { + fn new(deploy_emulator: bool, evm_contract_address: Address) -> Self { + Self { + deploy_emulator, + storage: InMemoryStorage::with_system_contracts(hash_bytecode), + evm_contract_addresses: vec![evm_contract_address], + } + } + + fn with_mock_deployer(mut self) -> Self { + override_system_contracts(&mut self.storage); + self + } + + fn with_evm_address(mut self, address: Address) -> Self { + self.evm_contract_addresses.push(address); + self + } + + fn build(self) -> VmTester { + let mock_emulator = read_bytecode(MOCK_EMULATOR_PATH); + let mut storage = self.storage; + let mut system_env = default_system_env(); + if self.deploy_emulator { + let evm_bytecode: Vec<_> = (0..32).collect(); + let evm_bytecode_hash = hash_evm_bytecode(&evm_bytecode); + storage.set_value( + get_known_code_key(&evm_bytecode_hash), + H256::from_low_u64_be(1), + ); + for evm_address in self.evm_contract_addresses { + storage.set_value(get_code_key(&evm_address), evm_bytecode_hash); + } + + system_env.base_system_smart_contracts.evm_emulator = Some(SystemContractCode { + hash: hash_bytecode(&mock_emulator), + code: bytes_to_be_words(mock_emulator), + }); + } else { + let emulator_hash = hash_bytecode(&mock_emulator); + storage.set_value(get_known_code_key(&emulator_hash), H256::from_low_u64_be(1)); + storage.store_factory_dep(emulator_hash, mock_emulator); + + for evm_address in self.evm_contract_addresses { + storage.set_value(get_code_key(&evm_address), emulator_hash); + // Set `isUserSpace` in the emulator storage to `true`, so that it skips emulator-specific checks + storage.set_value( + StorageKey::new(AccountTreeId::new(evm_address), H256::zero()), + H256::from_low_u64_be(1), + ); + } + } + + VmTesterBuilder::new(HistoryEnabled) + .with_system_env(system_env) + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build() + } +} + +#[test] +fn tracing_evm_contract_deployment() { + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + override_system_contracts(&mut storage); let mut system_env = default_system_env(); // The EVM emulator will not be accessed, so we set it to a dummy value. @@ -51,7 +145,7 @@ fn tracing_evm_contract_deployment() { .build(); let account = &mut vm.rich_accounts[0]; - let args = [Token::Bytes((0..=u8::MAX).collect())]; + let args = [Token::Bytes((0..32).collect())]; let evm_bytecode = ethabi::encode(&args); let expected_bytecode_hash = hash_evm_bytecode(&evm_bytecode); let execute = Execute::for_deploy(expected_bytecode_hash, vec![0; 32], &args); @@ -74,3 +168,343 @@ fn tracing_evm_contract_deployment() { evm_bytecode ); } + +#[test] +fn mock_emulator_basics() { + let called_address = Address::repeat_byte(0x23); + let mut vm = EvmTestBuilder::new(true, called_address).build(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(called_address), + calldata: vec![], + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +const RECIPIENT_ADDRESS: Address = Address::repeat_byte(0x12); + +/// `deploy_emulator = false` here and below tests the mock emulator as an ordinary contract (i.e., sanity-checks its logic). +#[test_casing(2, [false, true])] +#[test] +fn mock_emulator_with_payment(deploy_emulator: bool) { + let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); + let mut vm = EvmTestBuilder::new(deploy_emulator, RECIPIENT_ADDRESS).build(); + + let mut current_balance = U256::zero(); + for i in 1_u64..=5 { + let transferred_value = (1_000_000_000 * i).into(); + let vm_result = test_payment( + &mut vm, + &mock_emulator_abi, + &mut current_balance, + transferred_value, + ); + + let balance_storage_logs = vm_result.logs.storage_logs.iter().filter_map(|log| { + (*log.log.key.address() == L2_BASE_TOKEN_ADDRESS) + .then_some((*log.log.key.key(), h256_to_u256(log.log.value))) + }); + let balances: HashMap<_, _> = balance_storage_logs.collect(); + assert_eq!( + balances[&key_for_eth_balance(&RECIPIENT_ADDRESS)], + current_balance + ); + } +} + +fn test_payment( + vm: &mut VmTester, + mock_emulator_abi: ðabi::Contract, + balance: &mut U256, + transferred_value: U256, +) -> VmExecutionResultAndLogs { + *balance += transferred_value; + let test_payment_fn = mock_emulator_abi.function("testPayment").unwrap(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(RECIPIENT_ADDRESS), + calldata: test_payment_fn + .encode_input(&[Token::Uint(transferred_value), Token::Uint(*balance)]) + .unwrap(), + value: transferred_value, + factory_deps: vec![], + }, + None, + ); + + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); + vm_result +} + +#[test_casing(4, Product(([false, true], [false, true])))] +#[test] +fn mock_emulator_with_recursion(deploy_emulator: bool, is_external: bool) { + let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); + let recipient_address = Address::repeat_byte(0x12); + let mut vm = EvmTestBuilder::new(deploy_emulator, recipient_address).build(); + let account = &mut vm.rich_accounts[0]; + + let test_recursion_fn = mock_emulator_abi + .function(if is_external { + "testExternalRecursion" + } else { + "testRecursion" + }) + .unwrap(); + let mut expected_value = U256::one(); + let depth = 50_u32; + for i in 2..=depth { + expected_value *= i; + } + + let factory_deps = if is_external { + vec![read_bytecode(RECURSIVE_CONTRACT_PATH)] + } else { + vec![] + }; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(recipient_address), + calldata: test_recursion_fn + .encode_input(&[Token::Uint(depth.into()), Token::Uint(expected_value)]) + .unwrap(), + value: 0.into(), + factory_deps, + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} + +#[test] +fn calling_to_mock_emulator_from_native_contract() { + let recipient_address = Address::repeat_byte(0x12); + let mut vm = EvmTestBuilder::new(true, recipient_address).build(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let native_contract = read_bytecode(RECURSIVE_CONTRACT_PATH); + let native_contract_abi = load_contract(RECURSIVE_CONTRACT_PATH); + let deploy_tx = account.get_deploy_tx( + &native_contract, + Some(&[Token::Address(recipient_address)]), + TxType::L2, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + // Call from the native contract to the EVM emulator. + let test_fn = native_contract_abi.function("recurse").unwrap(); + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(deploy_tx.address), + calldata: test_fn.encode_input(&[Token::Uint(50.into())]).unwrap(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[test] +fn mock_emulator_with_deployment() { + let contract_address = Address::repeat_byte(0xaa); + let mut vm = EvmTestBuilder::new(true, contract_address) + .with_mock_deployer() + .build(); + let account = &mut vm.rich_accounts[0]; + + let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); + let new_evm_bytecode = vec![0xfe; 96]; + let new_evm_bytecode_hash = hash_evm_bytecode(&new_evm_bytecode); + + let test_fn = mock_emulator_abi.function("testDeploymentAndCall").unwrap(); + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(contract_address), + calldata: test_fn + .encode_input(&[ + Token::FixedBytes(new_evm_bytecode_hash.0.into()), + Token::Bytes(new_evm_bytecode.clone()), + ]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); + + let factory_deps = vm_result.new_known_factory_deps.unwrap(); + assert_eq!( + factory_deps, + HashMap::from([(new_evm_bytecode_hash, new_evm_bytecode)]) + ); +} + +#[test] +fn mock_emulator_with_delegate_call() { + let evm_contract_address = Address::repeat_byte(0xaa); + let other_evm_contract_address = Address::repeat_byte(0xbb); + let mut builder = EvmTestBuilder::new(true, evm_contract_address); + builder.storage.set_value( + storage_key_for_eth_balance(&evm_contract_address), + H256::from_low_u64_be(1_000_000), + ); + builder.storage.set_value( + storage_key_for_eth_balance(&other_evm_contract_address), + H256::from_low_u64_be(2_000_000), + ); + let mut vm = builder.with_evm_address(other_evm_contract_address).build(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let native_contract = read_bytecode(INCREMENTING_CONTRACT_PATH); + let native_contract_abi = load_contract(INCREMENTING_CONTRACT_PATH); + let deploy_tx = account.get_deploy_tx(&native_contract, None, TxType::L2); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + let test_fn = native_contract_abi.function("testDelegateCall").unwrap(); + // Delegate to the native contract from EVM. + test_delegate_call(&mut vm, test_fn, evm_contract_address, deploy_tx.address); + // Delegate to EVM from the native contract. + test_delegate_call(&mut vm, test_fn, deploy_tx.address, evm_contract_address); + // Delegate to EVM from EVM. + test_delegate_call( + &mut vm, + test_fn, + evm_contract_address, + other_evm_contract_address, + ); +} + +fn test_delegate_call( + vm: &mut VmTester, + test_fn: ðabi::Function, + from: Address, + to: Address, +) { + let account = &mut vm.rich_accounts[0]; + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(from), + calldata: test_fn.encode_input(&[Token::Address(to)]).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} + +#[test] +fn mock_emulator_with_static_call() { + let evm_contract_address = Address::repeat_byte(0xaa); + let other_evm_contract_address = Address::repeat_byte(0xbb); + let mut builder = EvmTestBuilder::new(true, evm_contract_address); + builder.storage.set_value( + storage_key_for_eth_balance(&evm_contract_address), + H256::from_low_u64_be(1_000_000), + ); + builder.storage.set_value( + storage_key_for_eth_balance(&other_evm_contract_address), + H256::from_low_u64_be(2_000_000), + ); + // Set differing read values for tested contracts. The slot index is defined in the contract. + let value_slot = H256::from_low_u64_be(0x123); + builder.storage.set_value( + StorageKey::new(AccountTreeId::new(evm_contract_address), value_slot), + H256::from_low_u64_be(100), + ); + builder.storage.set_value( + StorageKey::new(AccountTreeId::new(other_evm_contract_address), value_slot), + H256::from_low_u64_be(200), + ); + let mut vm = builder.with_evm_address(other_evm_contract_address).build(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let native_contract = read_bytecode(INCREMENTING_CONTRACT_PATH); + let native_contract_abi = load_contract(INCREMENTING_CONTRACT_PATH); + let deploy_tx = account.get_deploy_tx(&native_contract, None, TxType::L2); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + let test_fn = native_contract_abi.function("testStaticCall").unwrap(); + // Call to the native contract from EVM. + test_static_call(&mut vm, test_fn, evm_contract_address, deploy_tx.address, 0); + // Call to EVM from the native contract. + test_static_call( + &mut vm, + test_fn, + deploy_tx.address, + evm_contract_address, + 100, + ); + // Call to EVM from EVM. + test_static_call( + &mut vm, + test_fn, + evm_contract_address, + other_evm_contract_address, + 200, + ); +} + +fn test_static_call( + vm: &mut VmTester, + test_fn: ðabi::Function, + from: Address, + to: Address, + expected_value: u64, +) { + let account = &mut vm.rich_accounts[0]; + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(from), + calldata: test_fn + .encode_input(&[Token::Address(to), Token::Uint(expected_value.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs index c5487379ce31..4d728962febf 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs @@ -1,7 +1,8 @@ use ethabi::Contract; use once_cell::sync::Lazy; use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, + load_contract, read_bytecode, read_yul_bytecode, read_zbin_bytecode, BaseSystemContracts, + SystemContractCode, }; use zksync_types::{ utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, @@ -59,10 +60,8 @@ pub(crate) fn read_test_contract() -> Vec { } pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); + let artifacts_path = "contracts/system-contracts/bootloader/tests/artifacts/"; + let bootloader_code = read_yul_bytecode(artifacts_path, test); let bootloader_hash = hash_bytecode(&bootloader_code); SystemContractCode { diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs index d91ee13a920a..becc4f225276 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs @@ -8,16 +8,14 @@ use zk_evm_1_5_0::{ }, }; use zksync_types::{CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS}; -use zksync_utils::{bytes_to_be_words, h256_to_u256}; +use zksync_utils::{bytecode::hash_evm_bytecode, bytes_to_be_words, h256_to_u256}; use zksync_vm_interface::storage::StoragePtr; use super::{traits::VmTracer, utils::read_pointer}; use crate::{ interface::{storage::WriteStorage, tracer::TracerExecutionStatus}, tracers::dynamic::vm_1_5_0::DynTracer, - vm_latest::{ - utils::hash_evm_bytecode, BootloaderState, HistoryMode, SimpleMemory, ZkSyncVmState, - }, + vm_latest::{BootloaderState, HistoryMode, SimpleMemory, ZkSyncVmState}, }; /// Tracer responsible for collecting information about EVM deploys and providing those diff --git a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs index e07d3eda7c4c..aeb66755f514 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs @@ -1,11 +1,7 @@ //! Utility functions for the VM. use once_cell::sync::Lazy; -use zk_evm_1_5_0::{ - aux_structures::MemoryPage, - sha2, - zkevm_opcode_defs::{BlobSha256Format, VersionedHashLen32}, -}; +use zk_evm_1_5_0::aux_structures::MemoryPage; use zksync_types::{H256, KNOWN_CODES_STORAGE_ADDRESS}; use zksync_vm_interface::VmEvent; @@ -15,22 +11,6 @@ pub(crate) mod logs; pub mod overhead; pub mod transaction_encoding; -pub(crate) fn hash_evm_bytecode(bytecode: &[u8]) -> H256 { - use sha2::{Digest, Sha256}; - let mut hasher = Sha256::new(); - let len = bytecode.len() as u16; - hasher.update(bytecode); - let result = hasher.finalize(); - - let mut output = [0u8; 32]; - output[..].copy_from_slice(result.as_slice()); - output[0] = BlobSha256Format::VERSION_BYTE; - output[1] = 0; - output[2..4].copy_from_slice(&len.to_be_bytes()); - - H256(output) -} - pub const fn heap_page_from_base(base: MemoryPage) -> MemoryPage { MemoryPage(base.0 + 2) } diff --git a/core/lib/multivm/src/versions/vm_m5/utils.rs b/core/lib/multivm/src/versions/vm_m5/utils.rs index 8c5bca674c69..a38618395b1f 100644 --- a/core/lib/multivm/src/versions/vm_m5/utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/utils.rs @@ -5,7 +5,7 @@ use zk_evm_1_3_1::{ vm_state::PrimitiveValue, zkevm_opcode_defs::FatPointer, }; -use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; +use zksync_contracts::BaseSystemContracts; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; @@ -253,13 +253,6 @@ pub fn create_test_block_params() -> (BlockContext, BlockProperties) { ) } -pub fn read_bootloader_test_code(test: &str) -> Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )) -} - /// Log query, which handle initial and repeated writes to the storage #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct StorageLogQuery { diff --git a/core/lib/multivm/src/versions/vm_m6/utils.rs b/core/lib/multivm/src/versions/vm_m6/utils.rs index d9709022fe3c..912a30a4eafc 100644 --- a/core/lib/multivm/src/versions/vm_m6/utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/utils.rs @@ -5,7 +5,7 @@ use zk_evm_1_3_1::{ vm_state::PrimitiveValue, zkevm_opcode_defs::FatPointer, }; -use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; +use zksync_contracts::BaseSystemContracts; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; @@ -256,13 +256,6 @@ pub fn create_test_block_params() -> (BlockContext, BlockProperties) { ) } -pub fn read_bootloader_test_code(test: &str) -> Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )) -} - pub(crate) fn calculate_computational_gas_used< S: Storage, T: PubdataSpentTracer, diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index ac5693b61619..897070345232 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -1,6 +1,6 @@ use std::mem; -use zksync_types::{vm::VmVersion, Transaction}; +use zksync_types::{vm::VmVersion, ProtocolVersionId, Transaction}; use zksync_vm2::interface::Tracer; use crate::{ @@ -328,3 +328,11 @@ impl FastVmInstance { Self::Shadowed(ShadowedFastVm::new(l1_batch_env, system_env, storage_view)) } } + +/// Checks whether the protocol version is supported by the fast VM. +pub fn is_supported_by_fast_vm(protocol_version: ProtocolVersionId) -> bool { + matches!( + protocol_version.into(), + VmVersion::Vm1_5_0IncreasedBootloaderMemory + ) +} diff --git a/core/lib/protobuf_config/Cargo.toml b/core/lib/protobuf_config/Cargo.toml index 92d9bd53978c..87a0a63567ba 100644 --- a/core/lib/protobuf_config/Cargo.toml +++ b/core/lib/protobuf_config/Cargo.toml @@ -26,6 +26,7 @@ rand.workspace = true hex.workspace = true secrecy.workspace = true tracing.workspace = true +time.workspace = true [build-dependencies] zksync_protobuf_build.workspace = true diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs index 81cad437fe4b..2219b6a82ea8 100644 --- a/core/lib/protobuf_config/src/consensus.rs +++ b/core/lib/protobuf_config/src/consensus.rs @@ -148,6 +148,7 @@ impl ProtoRepr for proto::Config { }; Ok(Self::Type { + port: self.port.and_then(|x| x.try_into().ok()), server_addr: required(&self.server_addr) .and_then(|x| Ok(x.parse()?)) .context("server_addr")?, @@ -182,6 +183,7 @@ impl ProtoRepr for proto::Config { fn build(this: &Self::Type) -> Self { Self { + port: this.port.map(|x| x.into()), server_addr: Some(this.server_addr.to_string()), public_addr: Some(this.public_addr.0.clone()), max_payload_size: Some(this.max_payload_size.try_into().unwrap()), diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index c89199359aaa..68f7f699de20 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -28,6 +28,7 @@ mod observability; mod proof_data_handler; pub mod proto; mod prover; +mod prover_autoscaler; mod prover_job_monitor; mod pruning; mod secrets; @@ -63,24 +64,23 @@ pub fn read_optional_repr(field: &Option

) -> Option { .transpose() // This error will printed, only if the config partially filled, allows to debug config issues easier .map_err(|err| { - tracing::error!("Failed to serialize config: {err}"); + tracing::error!("Failed to parse config: {err:#}"); err }) .ok() .flatten() } -pub fn decode_yaml_repr( +/// Reads a yaml file. +pub fn read_yaml_repr( path: &PathBuf, deny_unknown_fields: bool, ) -> anyhow::Result { let yaml = std::fs::read_to_string(path).with_context(|| path.display().to_string())?; - let d = serde_yaml::Deserializer::from_str(&yaml); - let this: T = zksync_protobuf::serde::Deserialize { + zksync_protobuf::serde::Deserialize { deny_unknown_fields, } - .proto(d)?; - this.read() + .proto_repr_from_yaml::(&yaml) } pub fn encode_yaml_repr(value: &T::Type) -> anyhow::Result> { diff --git a/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto new file mode 100644 index 000000000000..e1d11b94d8f1 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto @@ -0,0 +1,46 @@ +syntax = "proto3"; + +package zksync.config.prover_autoscaler; + +import "zksync/std.proto"; +import "zksync/config/observability.proto"; + +message ProverAutoscalerConfig { + optional std.Duration graceful_shutdown_timeout = 1; // optional + optional ProverAutoscalerAgentConfig agent_config = 2; // optional + optional ProverAutoscalerScalerConfig scaler_config = 3; // optional + optional observability.Observability observability = 4; // optional +} + +message ProverAutoscalerAgentConfig { + optional uint32 prometheus_port = 1; // required + optional uint32 http_port = 2; // required + repeated string namespaces = 3; // optional + optional string cluster_name = 4; // optional +} + +message ProtocolVersion { + optional string namespace = 1; // required + optional string protocol_version = 2; // required +} + +message ClusterPriority { + optional string cluster = 1; // required + optional uint32 priority = 2; // required +} + +message ProverSpeed { + optional string gpu = 1; // required + optional uint32 speed = 2; // required +} + +message ProverAutoscalerScalerConfig { + optional uint32 prometheus_port = 1; // required + optional std.Duration scaler_run_interval = 2; // optional + optional string prover_job_monitor_url = 3; // required + repeated string agents = 4; // required at least one + repeated ProtocolVersion protocol_versions = 5; // repeated at least one + repeated ClusterPriority cluster_priorities = 6; // optional + repeated ProverSpeed prover_speed = 7; // optional + optional uint32 long_pending_duration_s = 8; // optional +} diff --git a/core/lib/protobuf_config/src/proto/core/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto index 92527df739aa..9b0d69e7270c 100644 --- a/core/lib/protobuf_config/src/proto/core/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -70,6 +70,9 @@ message Config { reserved 3; reserved "validators"; + // Port to listen on, for incoming TCP connections. + optional uint32 port = 12; + // IP:port to listen on, for incoming TCP connections. // Use `0.0.0.0:` to listen on all network interfaces (i.e. on all IPs exposed by this VM). optional string server_addr = 1; // required; IpAddr diff --git a/core/lib/protobuf_config/src/prover_autoscaler.rs b/core/lib/protobuf_config/src/prover_autoscaler.rs new file mode 100644 index 000000000000..f7da099cb829 --- /dev/null +++ b/core/lib/protobuf_config/src/prover_autoscaler.rs @@ -0,0 +1,172 @@ +use anyhow::Context as _; +use time::Duration; +use zksync_config::configs::{self, prover_autoscaler::Gpu}; +use zksync_protobuf::{read_optional, repr::ProtoRepr, required, ProtoFmt}; + +use crate::{proto::prover_autoscaler as proto, read_optional_repr}; + +impl ProtoRepr for proto::ProverAutoscalerConfig { + type Type = configs::prover_autoscaler::ProverAutoscalerConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + graceful_shutdown_timeout: read_optional(&self.graceful_shutdown_timeout) + .context("graceful_shutdown_timeout")? + .unwrap_or(Self::Type::default_graceful_shutdown_timeout()), + agent_config: read_optional_repr(&self.agent_config), + scaler_config: read_optional_repr(&self.scaler_config), + observability: read_optional_repr(&self.observability), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + graceful_shutdown_timeout: Some(ProtoFmt::build(&this.graceful_shutdown_timeout)), + agent_config: this.agent_config.as_ref().map(ProtoRepr::build), + scaler_config: this.scaler_config.as_ref().map(ProtoRepr::build), + observability: this.observability.as_ref().map(ProtoRepr::build), + } + } +} + +impl ProtoRepr for proto::ProverAutoscalerAgentConfig { + type Type = configs::prover_autoscaler::ProverAutoscalerAgentConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + prometheus_port: required(&self.prometheus_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("prometheus_port")?, + http_port: required(&self.http_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("http_port")?, + namespaces: self.namespaces.to_vec(), + cluster_name: Some("".to_string()), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + prometheus_port: Some(this.prometheus_port.into()), + http_port: Some(this.http_port.into()), + namespaces: this.namespaces.clone(), + cluster_name: this.cluster_name.clone(), + } + } +} + +impl ProtoRepr for proto::ProverAutoscalerScalerConfig { + type Type = configs::prover_autoscaler::ProverAutoscalerScalerConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + prometheus_port: required(&self.prometheus_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("prometheus_port")?, + scaler_run_interval: read_optional(&self.scaler_run_interval) + .context("scaler_run_interval")? + .unwrap_or(Self::Type::default_scaler_run_interval()), + prover_job_monitor_url: required(&self.prover_job_monitor_url) + .context("prover_job_monitor_url")? + .clone(), + agents: self.agents.to_vec(), + protocol_versions: self + .protocol_versions + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("protocol_versions")?, + cluster_priorities: self + .cluster_priorities + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("cluster_priorities")?, + prover_speed: self + .prover_speed + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("prover_speed")?, + long_pending_duration: match self.long_pending_duration_s { + Some(s) => Duration::seconds(s.into()), + None => Self::Type::default_long_pending_duration(), + }, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + prometheus_port: Some(this.prometheus_port.into()), + scaler_run_interval: Some(ProtoFmt::build(&this.scaler_run_interval)), + prover_job_monitor_url: Some(this.prover_job_monitor_url.clone()), + agents: this.agents.clone(), + protocol_versions: this + .protocol_versions + .iter() + .map(|(k, v)| proto::ProtocolVersion::build(&(k.clone(), v.clone()))) + .collect(), + cluster_priorities: this + .cluster_priorities + .iter() + .map(|(k, v)| proto::ClusterPriority::build(&(k.clone(), *v))) + .collect(), + prover_speed: this + .prover_speed + .iter() + .map(|(k, v)| proto::ProverSpeed::build(&(*k, *v))) + .collect(), + long_pending_duration_s: Some(this.long_pending_duration.whole_seconds() as u32), + } + } +} + +impl ProtoRepr for proto::ProtocolVersion { + type Type = (String, String); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.namespace).context("namespace")?.clone(), + required(&self.protocol_version) + .context("protocol_version")? + .clone(), + )) + } + fn build(this: &Self::Type) -> Self { + Self { + namespace: Some(this.0.clone()), + protocol_version: Some(this.1.clone()), + } + } +} + +impl ProtoRepr for proto::ClusterPriority { + type Type = (String, u32); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.cluster).context("cluster")?.clone(), + *required(&self.priority).context("priority")?, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + cluster: Some(this.0.clone()), + priority: Some(this.1), + } + } +} + +impl ProtoRepr for proto::ProverSpeed { + type Type = (Gpu, u32); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.gpu).context("gpu")?.parse()?, + *required(&self.speed).context("speed")?, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + gpu: Some(this.0.to_string()), + speed: Some(this.1), + } + } +} diff --git a/core/lib/protobuf_config/src/tests.rs b/core/lib/protobuf_config/src/tests.rs index d653b9b92bfd..c72bce0bf9a6 100644 --- a/core/lib/protobuf_config/src/tests.rs +++ b/core/lib/protobuf_config/src/tests.rs @@ -2,7 +2,7 @@ use std::{path::PathBuf, str::FromStr}; use zksync_protobuf::testonly::{test_encode_all_formats, ReprConv}; -use crate::{decode_yaml_repr, proto}; +use crate::{proto, read_yaml_repr}; /// Tests config <-> proto (boilerplate) conversions. #[test] @@ -60,14 +60,11 @@ fn test_encoding() { #[test] fn verify_file_parsing() { let base_path = PathBuf::from_str("../../../etc/env/file_based/").unwrap(); - decode_yaml_repr::(&base_path.join("general.yaml"), true) - .unwrap(); + read_yaml_repr::(&base_path.join("general.yaml"), true).unwrap(); // It's allowed to have unknown fields in wallets, e.g. we keep private key for fee account - decode_yaml_repr::(&base_path.join("wallets.yaml"), false).unwrap(); - decode_yaml_repr::(&base_path.join("genesis.yaml"), true).unwrap(); - decode_yaml_repr::(&base_path.join("contracts.yaml"), true) - .unwrap(); - decode_yaml_repr::(&base_path.join("secrets.yaml"), true).unwrap(); - decode_yaml_repr::(&base_path.join("external_node.yaml"), true) - .unwrap(); + read_yaml_repr::(&base_path.join("wallets.yaml"), false).unwrap(); + read_yaml_repr::(&base_path.join("genesis.yaml"), true).unwrap(); + read_yaml_repr::(&base_path.join("contracts.yaml"), true).unwrap(); + read_yaml_repr::(&base_path.join("secrets.yaml"), true).unwrap(); + read_yaml_repr::(&base_path.join("external_node.yaml"), true).unwrap(); } diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 8fe192a5f518..28bc1998312b 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -3,7 +3,7 @@ use std::{collections::HashMap, convert::TryInto, fmt::Debug}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, Bytes}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; -use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; +use zksync_object_store::{_reexports::BoxedError, serialize_using_bincode, Bucket, StoredObject}; use zksync_types::{ basic_fri_types::Eip4844Blobs, block::L2BlockExecutionData, witness_block_state::WitnessStorageState, L1BatchNumber, ProtocolVersionId, H256, U256, @@ -151,6 +151,38 @@ pub struct VMRunWitnessInputData { pub witness_block_state: WitnessStorageState, } +// skip_serializing_if for field evm_emulator_code_hash doesn't work fine with bincode, +// so we are implementing custom deserialization for it +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct VMRunWitnessInputDataLegacy { + pub l1_batch_number: L1BatchNumber, + pub used_bytecodes: HashMap>, + pub initial_heap_content: Vec<(usize, U256)>, + pub protocol_version: ProtocolVersionId, + pub bootloader_code: Vec<[u8; 32]>, + pub default_account_code_hash: U256, + pub storage_refunds: Vec, + pub pubdata_costs: Vec, + pub witness_block_state: WitnessStorageState, +} + +impl From for VMRunWitnessInputData { + fn from(value: VMRunWitnessInputDataLegacy) -> Self { + Self { + l1_batch_number: value.l1_batch_number, + used_bytecodes: value.used_bytecodes, + initial_heap_content: value.initial_heap_content, + protocol_version: value.protocol_version, + bootloader_code: value.bootloader_code, + default_account_code_hash: value.default_account_code_hash, + evm_emulator_code_hash: None, + storage_refunds: value.storage_refunds, + pubdata_costs: value.pubdata_costs, + witness_block_state: value.witness_block_state, + } + } +} + impl StoredObject for VMRunWitnessInputData { const BUCKET: Bucket = Bucket::WitnessInput; @@ -160,7 +192,17 @@ impl StoredObject for VMRunWitnessInputData { format!("vm_run_data_{key}.bin") } - serialize_using_bincode!(); + fn serialize(&self) -> Result, BoxedError> { + zksync_object_store::bincode::serialize(self).map_err(Into::into) + } + + fn deserialize(bytes: Vec) -> Result { + zksync_object_store::bincode::deserialize::(&bytes).or_else(|_| { + zksync_object_store::bincode::deserialize::(&bytes) + .map(Into::into) + .map_err(Into::into) + }) + } } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -171,6 +213,25 @@ pub struct WitnessInputData { pub eip_4844_blobs: Eip4844Blobs, } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WitnessInputDataLegacy { + pub vm_run_data: VMRunWitnessInputDataLegacy, + pub merkle_paths: WitnessInputMerklePaths, + pub previous_batch_metadata: L1BatchMetadataHashes, + pub eip_4844_blobs: Eip4844Blobs, +} + +impl From for WitnessInputData { + fn from(value: WitnessInputDataLegacy) -> Self { + Self { + vm_run_data: value.vm_run_data.into(), + merkle_paths: value.merkle_paths, + previous_batch_metadata: value.previous_batch_metadata, + eip_4844_blobs: value.eip_4844_blobs, + } + } +} + impl StoredObject for WitnessInputData { const BUCKET: Bucket = Bucket::WitnessInput; @@ -180,7 +241,17 @@ impl StoredObject for WitnessInputData { format!("witness_inputs_{key}.bin") } - serialize_using_bincode!(); + fn serialize(&self) -> Result, BoxedError> { + zksync_object_store::bincode::serialize(self).map_err(Into::into) + } + + fn deserialize(bytes: Vec) -> Result { + zksync_object_store::bincode::deserialize::(&bytes).or_else(|_| { + zksync_object_store::bincode::deserialize::(&bytes) + .map(Into::into) + .map_err(Into::into) + }) + } } #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/core/lib/types/src/api/en.rs b/core/lib/types/src/api/en.rs index 9391c8627573..209ab7c24f98 100644 --- a/core/lib/types/src/api/en.rs +++ b/core/lib/types/src/api/en.rs @@ -64,3 +64,9 @@ pub struct ConsensusGenesis(pub serde_json::Value); /// The wrapped JSON value corresponds to `zksync_dal::consensus::AttestationStatus`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct AttestationStatus(pub serde_json::Value); + +/// Block metadata that should have been committed to on L1, but it is not. +/// +/// The wrapped JSON value corresponds to `zksync_dal::consensus::BlockMetadata`. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BlockMetadata(pub serde_json::Value); diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index 9c1609bf1756..9211a6f1d8cf 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -65,6 +65,28 @@ pub struct L1BatchHeader { /// Version of protocol used for the L1 batch. pub protocol_version: Option, pub pubdata_input: Option>, + pub fee_address: Address, +} + +impl L1BatchHeader { + pub fn to_unsealed_header(&self, fee_input: BatchFeeInput) -> UnsealedL1BatchHeader { + UnsealedL1BatchHeader { + number: self.number, + timestamp: self.timestamp, + protocol_version: self.protocol_version, + fee_address: self.fee_address, + fee_input, + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct UnsealedL1BatchHeader { + pub number: L1BatchNumber, + pub timestamp: u64, + pub protocol_version: Option, + pub fee_address: Address, + pub fee_input: BatchFeeInput, } /// Holder for the L2 block metadata that is not available from transactions themselves. @@ -132,6 +154,7 @@ impl L1BatchHeader { system_logs: vec![], protocol_version: Some(protocol_version), pubdata_input: Some(vec![]), + fee_address: Default::default(), } } diff --git a/core/lib/utils/src/bytecode.rs b/core/lib/utils/src/bytecode.rs index 48bdb4330207..01cce5bc34d0 100644 --- a/core/lib/utils/src/bytecode.rs +++ b/core/lib/utils/src/bytecode.rs @@ -1,5 +1,6 @@ // FIXME: move to basic_types? +use zk_evm::k256::sha2::{Digest, Sha256}; use zksync_basic_types::H256; use crate::bytes_to_chunks; @@ -40,6 +41,7 @@ pub fn validate_bytecode(code: &[u8]) -> Result<(), InvalidBytecodeError> { Ok(()) } +/// Hashes the provided EraVM bytecode. pub fn hash_bytecode(code: &[u8]) -> H256 { let chunked_code = bytes_to_chunks(code); let hash = zk_evm::zkevm_opcode_defs::utils::bytecode_to_code_hash(&chunked_code) @@ -55,3 +57,62 @@ pub fn bytecode_len_in_words(bytecodehash: &H256) -> u16 { pub fn bytecode_len_in_bytes(bytecodehash: H256) -> usize { bytecode_len_in_words(&bytecodehash) as usize * 32 } + +/// Bytecode marker encoded in the first byte of the bytecode hash. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[repr(u8)] +pub enum BytecodeMarker { + /// EraVM bytecode marker (1). + EraVm = 1, + /// EVM bytecode marker (2). + Evm = 2, +} + +impl BytecodeMarker { + /// Parses a marker from the bytecode hash. + pub fn new(bytecode_hash: H256) -> Option { + Some(match bytecode_hash.as_bytes()[0] { + val if val == Self::EraVm as u8 => Self::EraVm, + val if val == Self::Evm as u8 => Self::Evm, + _ => return None, + }) + } +} + +/// Hashes the provided EVM bytecode. The bytecode must be padded to an odd number of 32-byte words; +/// bytecodes stored in the known codes storage satisfy this requirement automatically. +pub fn hash_evm_bytecode(bytecode: &[u8]) -> H256 { + validate_bytecode(bytecode).expect("invalid EVM bytecode"); + + let mut hasher = Sha256::new(); + let len = bytecode.len() as u16; + hasher.update(bytecode); + let result = hasher.finalize(); + + let mut output = [0u8; 32]; + output[..].copy_from_slice(result.as_slice()); + output[0] = BytecodeMarker::Evm as u8; + output[1] = 0; + output[2..4].copy_from_slice(&len.to_be_bytes()); + + H256(output) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytecode_markers_are_valid() { + let bytecode_hash = hash_bytecode(&[0; 32]); + assert_eq!( + BytecodeMarker::new(bytecode_hash), + Some(BytecodeMarker::EraVm) + ); + let bytecode_hash = hash_evm_bytecode(&[0; 32]); + assert_eq!( + BytecodeMarker::new(bytecode_hash), + Some(BytecodeMarker::Evm) + ); + } +} diff --git a/core/lib/utils/src/env.rs b/core/lib/utils/src/env.rs index 5ae07caf1486..8f4aa1da9400 100644 --- a/core/lib/utils/src/env.rs +++ b/core/lib/utils/src/env.rs @@ -19,8 +19,8 @@ pub enum Workspace<'a> { Core(&'a Path), /// `prover` folder. Prover(&'a Path), - /// `toolbox` folder. - Toolbox(&'a Path), + /// ZK Stack CLI folder. + ZkStackCli(&'a Path), } impl Workspace<'static> { @@ -48,7 +48,7 @@ impl Workspace<'static> { impl<'a> Workspace<'a> { const PROVER_DIRECTORY_NAME: &'static str = "prover"; - const TOOLBOX_DIRECTORY_NAME: &'static str = "zk_toolbox"; + const ZKSTACK_CLI_DIRECTORY_NAME: &'static str = "zkstack_cli"; /// Returns the path of the core workspace. /// For `Workspace::None`, considers the current directory to represent core workspace. @@ -56,7 +56,7 @@ impl<'a> Workspace<'a> { match self { Self::None => PathBuf::from("."), Self::Core(path) => path.into(), - Self::Prover(path) | Self::Toolbox(path) => path.parent().unwrap().into(), + Self::Prover(path) | Self::ZkStackCli(path) => path.parent().unwrap().into(), } } @@ -68,11 +68,11 @@ impl<'a> Workspace<'a> { } } - /// Returns the path of the `zk_toolbox`` workspace. - pub fn toolbox(self) -> PathBuf { + /// Returns the path of the ZK Stack CLI workspace. + pub fn zkstack_cli(self) -> PathBuf { match self { - Self::Toolbox(path) => path.into(), - _ => self.core().join(Self::TOOLBOX_DIRECTORY_NAME), + Self::ZkStackCli(path) => path.into(), + _ => self.core().join(Self::ZKSTACK_CLI_DIRECTORY_NAME), } } } @@ -81,8 +81,8 @@ impl<'a> From<&'a Path> for Workspace<'a> { fn from(path: &'a Path) -> Self { if path.ends_with(Self::PROVER_DIRECTORY_NAME) { Self::Prover(path) - } else if path.ends_with(Self::TOOLBOX_DIRECTORY_NAME) { - Self::Toolbox(path) + } else if path.ends_with(Self::ZKSTACK_CLI_DIRECTORY_NAME) { + Self::ZkStackCli(path) } else { Self::Core(path) } @@ -154,16 +154,16 @@ mod tests { let workspace = Workspace::locate(); assert_matches!(workspace, Workspace::Core(_)); let core_path = workspace.core(); - // Check if prover and toolbox directories exist. + // Check if prover and ZK Stack CLI directories exist. assert!(workspace.prover().exists()); assert_matches!( Workspace::from(workspace.prover().as_path()), Workspace::Prover(_) ); - assert!(workspace.toolbox().exists()); + assert!(workspace.zkstack_cli().exists()); assert_matches!( - Workspace::from(workspace.toolbox().as_path()), - Workspace::Toolbox(_) + Workspace::from(workspace.zkstack_cli().as_path()), + Workspace::ZkStackCli(_) ); // Prover. @@ -181,17 +181,17 @@ mod tests { Workspace::from(workspace.core().as_path()), Workspace::Core(_) ); - assert!(workspace.toolbox().exists()); + assert!(workspace.zkstack_cli().exists()); assert_matches!( - Workspace::from(workspace.toolbox().as_path()), - Workspace::Toolbox(_) + Workspace::from(workspace.zkstack_cli().as_path()), + Workspace::ZkStackCli(_) ); - // Toolbox. - std::env::set_current_dir(workspace.toolbox()).unwrap(); + // ZK Stack CLI + std::env::set_current_dir(workspace.zkstack_cli()).unwrap(); let workspace_path = locate_workspace_inner().unwrap(); let workspace = Workspace::from(workspace_path.as_path()); - assert_matches!(workspace, Workspace::Toolbox(_)); + assert_matches!(workspace, Workspace::ZkStackCli(_)); assert_eq!(workspace.core(), core_path); assert_matches!( Workspace::from(workspace.core().as_path()), diff --git a/core/lib/vm_executor/Cargo.toml b/core/lib/vm_executor/Cargo.toml index 089c2a9bcca7..a967aaa969ad 100644 --- a/core/lib/vm_executor/Cargo.toml +++ b/core/lib/vm_executor/Cargo.toml @@ -23,3 +23,6 @@ tokio.workspace = true anyhow.workspace = true tracing.workspace = true vise.workspace = true + +[dev-dependencies] +assert_matches.workspace = true diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index 146f0bb4e5c8..bc19086c9692 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -12,6 +12,7 @@ use zksync_multivm::{ ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, SystemEnv, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, + is_supported_by_fast_vm, tracers::CallTracer, vm_fast, vm_latest::HistoryEnabled, @@ -159,6 +160,10 @@ impl BatchVm { storage_ptr: StoragePtr>, mode: FastVmMode, ) -> Self { + if !is_supported_by_fast_vm(system_env.version) { + return Self::Legacy(LegacyVmInstance::new(l1_batch_env, system_env, storage_ptr)); + } + match mode { FastVmMode::Old => { Self::Legacy(LegacyVmInstance::new(l1_batch_env, system_env, storage_ptr)) @@ -443,3 +448,50 @@ impl CommandReceiver { } } } + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + use zksync_multivm::interface::{storage::InMemoryStorage, TxExecutionMode}; + use zksync_types::ProtocolVersionId; + + use super::*; + use crate::testonly::{default_l1_batch_env, default_system_env, FAST_VM_MODES}; + + #[test] + fn selecting_vm_for_execution() { + let l1_batch_env = default_l1_batch_env(1); + let mut system_env = SystemEnv { + version: ProtocolVersionId::Version22, + ..default_system_env(TxExecutionMode::VerifyExecute) + }; + let storage = StorageView::new(InMemoryStorage::default()).to_rc_ptr(); + for mode in FAST_VM_MODES { + let vm = BatchVm::<_, ()>::new( + l1_batch_env.clone(), + system_env.clone(), + storage.clone(), + mode, + ); + assert_matches!(vm, BatchVm::Legacy(_)); + } + + system_env.version = ProtocolVersionId::latest(); + let vm = BatchVm::<_, ()>::new( + l1_batch_env.clone(), + system_env.clone(), + storage.clone(), + FastVmMode::Old, + ); + assert_matches!(vm, BatchVm::Legacy(_)); + let vm = BatchVm::<_, ()>::new( + l1_batch_env.clone(), + system_env.clone(), + storage.clone(), + FastVmMode::New, + ); + assert_matches!(vm, BatchVm::Fast(FastVmInstance::Fast(_))); + let vm = BatchVm::<_, ()>::new(l1_batch_env, system_env, storage, FastVmMode::Shadow); + assert_matches!(vm, BatchVm::Fast(FastVmInstance::Shadowed(_))); + } +} diff --git a/core/lib/vm_executor/src/lib.rs b/core/lib/vm_executor/src/lib.rs index 1a0fbb002df9..83edb77fd629 100644 --- a/core/lib/vm_executor/src/lib.rs +++ b/core/lib/vm_executor/src/lib.rs @@ -9,3 +9,5 @@ pub mod batch; pub mod oneshot; mod shared; pub mod storage; +#[cfg(test)] +mod testonly; diff --git a/core/lib/vm_executor/src/testonly.rs b/core/lib/vm_executor/src/testonly.rs new file mode 100644 index 000000000000..5bcd604a4324 --- /dev/null +++ b/core/lib/vm_executor/src/testonly.rs @@ -0,0 +1,45 @@ +use once_cell::sync::Lazy; +use zksync_contracts::BaseSystemContracts; +use zksync_multivm::{ + interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; +use zksync_types::{ + block::L2BlockHasher, fee_model::BatchFeeInput, vm::FastVmMode, Address, L1BatchNumber, + L2BlockNumber, L2ChainId, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, +}; + +static BASE_SYSTEM_CONTRACTS: Lazy = + Lazy::new(BaseSystemContracts::load_from_disk); + +pub(crate) const FAST_VM_MODES: [FastVmMode; 3] = + [FastVmMode::Old, FastVmMode::New, FastVmMode::Shadow]; + +pub(crate) fn default_system_env(execution_mode: TxExecutionMode) -> SystemEnv { + SystemEnv { + zk_porter_available: ZKPORTER_IS_AVAILABLE, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: BASE_SYSTEM_CONTRACTS.clone(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode, + default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + chain_id: L2ChainId::default(), + } +} + +pub(crate) fn default_l1_batch_env(number: u32) -> L1BatchEnv { + L1BatchEnv { + previous_batch_hash: Some(H256::zero()), + number: L1BatchNumber(number), + timestamp: number.into(), + fee_account: Address::repeat_byte(0x22), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number, + timestamp: number.into(), + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(number - 1)), + max_virtual_blocks_to_create: 1, + }, + fee_input: BatchFeeInput::sensible_l1_pegged_default(), + } +} diff --git a/core/lib/vm_interface/src/types/inputs/l1_batch_env.rs b/core/lib/vm_interface/src/types/inputs/l1_batch_env.rs index dbc942476170..0011f0b138b5 100644 --- a/core/lib/vm_interface/src/types/inputs/l1_batch_env.rs +++ b/core/lib/vm_interface/src/types/inputs/l1_batch_env.rs @@ -1,5 +1,8 @@ use serde::{Deserialize, Serialize}; -use zksync_types::{fee_model::BatchFeeInput, Address, L1BatchNumber, H256}; +use zksync_types::{ + block::UnsealedL1BatchHeader, fee_model::BatchFeeInput, Address, L1BatchNumber, + ProtocolVersionId, H256, +}; use super::L2BlockEnv; @@ -21,3 +24,18 @@ pub struct L1BatchEnv { pub enforced_base_fee: Option, pub first_l2_block: L2BlockEnv, } + +impl L1BatchEnv { + pub fn into_unsealed_header( + self, + protocol_version: Option, + ) -> UnsealedL1BatchHeader { + UnsealedL1BatchHeader { + number: self.number, + timestamp: self.timestamp, + protocol_version, + fee_address: self.fee_account, + fee_input: self.fee_input, + } + } +} diff --git a/core/lib/web3_decl/src/namespaces/en.rs b/core/lib/web3_decl/src/namespaces/en.rs index 8a4d2db8c6fe..0f1fd9d34b83 100644 --- a/core/lib/web3_decl/src/namespaces/en.rs +++ b/core/lib/web3_decl/src/namespaces/en.rs @@ -28,6 +28,12 @@ pub trait EnNamespace { #[method(name = "consensusGlobalConfig")] async fn consensus_global_config(&self) -> RpcResult>; + #[method(name = "blockMetadata")] + async fn block_metadata( + &self, + block_number: L2BlockNumber, + ) -> RpcResult>; + /// Lists all tokens created at or before the specified `block_number`. /// /// This method is used by EN after snapshot recovery in order to recover token records. diff --git a/core/lib/web3_decl/src/namespaces/eth.rs b/core/lib/web3_decl/src/namespaces/eth.rs index 9f271d80cbcf..399773b845dc 100644 --- a/core/lib/web3_decl/src/namespaces/eth.rs +++ b/core/lib/web3_decl/src/namespaces/eth.rs @@ -13,7 +13,8 @@ use zksync_types::{ use crate::{ client::{ForWeb3Network, L2}, types::{ - Block, Bytes, Filter, FilterChanges, Index, Log, SyncState, TransactionReceipt, U256, U64, + Block, Bytes, Filter, FilterChanges, Index, Log, SyncState, TransactionReceipt, U64Number, + U256, U64, }, }; @@ -180,7 +181,7 @@ pub trait EthNamespace { #[method(name = "feeHistory")] async fn fee_history( &self, - block_count: U64, + block_count: U64Number, newest_block: BlockNumber, reward_percentiles: Vec, ) -> RpcResult; diff --git a/core/lib/web3_decl/src/types.rs b/core/lib/web3_decl/src/types.rs index 9994d21107be..36ee48a54a1b 100644 --- a/core/lib/web3_decl/src/types.rs +++ b/core/lib/web3_decl/src/types.rs @@ -16,7 +16,9 @@ use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; pub use zksync_types::{ api::{Block, BlockNumber, Log, TransactionReceipt, TransactionRequest}, ethabi, - web3::{BlockHeader, Bytes, CallRequest, FeeHistory, Index, SyncState, TraceFilter, Work}, + web3::{ + BlockHeader, Bytes, CallRequest, FeeHistory, Index, SyncState, TraceFilter, U64Number, Work, + }, Address, Transaction, H160, H256, H64, U256, U64, }; diff --git a/core/node/api_server/Cargo.toml b/core/node/api_server/Cargo.toml index d0723a9d23e7..067b9b3e3722 100644 --- a/core/node/api_server/Cargo.toml +++ b/core/node/api_server/Cargo.toml @@ -61,4 +61,5 @@ zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true assert_matches.workspace = true +const-decoder.workspace = true test-casing.workspace = true diff --git a/core/node/api_server/src/testonly.rs b/core/node/api_server/src/testonly.rs index 8dc7915385a1..45ed802d68f5 100644 --- a/core/node/api_server/src/testonly.rs +++ b/core/node/api_server/src/testonly.rs @@ -2,6 +2,7 @@ use std::{collections::HashMap, iter}; +use const_decoder::Decoder; use zk_evm_1_5_0::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; use zksync_contracts::{ eth_contract, get_loadnext_contract, load_contract, read_bytecode, @@ -26,6 +27,30 @@ use zksync_types::{ }; use zksync_utils::{address_to_u256, u256_to_h256}; +pub(crate) const RAW_EVM_BYTECODE: &[u8] = &const_decoder::decode!( + Decoder::Hex, + b"00000000000000000000000000000000000000000000000000000000000001266080604052348015\ + 600e575f80fd5b50600436106030575f3560e01c8063816898ff146034578063fb5343f314604c57\ + 5b5f80fd5b604a60048036038101906046919060a6565b6066565b005b6052606f565b604051605d\ + 919060d9565b60405180910390f35b805f8190555050565b5f5481565b5f80fd5b5f819050919050\ + 565b6088816078565b81146091575f80fd5b50565b5f8135905060a0816081565b92915050565b5f\ + 6020828403121560b85760b76074565b5b5f60c3848285016094565b91505092915050565b60d381\ + 6078565b82525050565b5f60208201905060ea5f83018460cc565b9291505056fea2646970667358\ + 221220caca1247066da378f2ec77c310f2ae51576272367b4fa11cc4350af4e9ce4d0964736f6c63\ + 4300081a00330000000000000000000000000000000000000000000000000000" +); +pub(crate) const PROCESSED_EVM_BYTECODE: &[u8] = &const_decoder::decode!( + Decoder::Hex, + b"6080604052348015600e575f80fd5b50600436106030575f3560e01c8063816898ff146034578063\ + fb5343f314604c575b5f80fd5b604a60048036038101906046919060a6565b6066565b005b605260\ + 6f565b604051605d919060d9565b60405180910390f35b805f8190555050565b5f5481565b5f80fd\ + 5b5f819050919050565b6088816078565b81146091575f80fd5b50565b5f8135905060a081608156\ + 5b92915050565b5f6020828403121560b85760b76074565b5b5f60c3848285016094565b91505092\ + 915050565b60d3816078565b82525050565b5f60208201905060ea5f83018460cc565b9291505056\ + fea2646970667358221220caca1247066da378f2ec77c310f2ae51576272367b4fa11cc4350af4e9\ + ce4d0964736f6c634300081a0033" +); + const EXPENSIVE_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; const PRECOMPILES_CONTRACT_PATH: &str = diff --git a/core/node/api_server/src/utils.rs b/core/node/api_server/src/utils.rs index 6769e773dc77..c7a1134682bf 100644 --- a/core/node/api_server/src/utils.rs +++ b/core/node/api_server/src/utils.rs @@ -6,9 +6,33 @@ use std::{ time::{Duration, Instant}, }; +use anyhow::Context; use zksync_dal::{Connection, Core, DalError}; +use zksync_multivm::circuit_sequencer_api_latest::boojum::ethereum_types::U256; use zksync_web3_decl::error::Web3Error; +pub(crate) fn prepare_evm_bytecode(raw: &[u8]) -> anyhow::Result> { + // EVM bytecodes are prefixed with a big-endian `U256` bytecode length. + let bytecode_len_bytes = raw.get(..32).context("length < 32")?; + let bytecode_len = U256::from_big_endian(bytecode_len_bytes); + let bytecode_len: usize = bytecode_len + .try_into() + .map_err(|_| anyhow::anyhow!("length ({bytecode_len}) overflow"))?; + let bytecode = raw.get(32..(32 + bytecode_len)).with_context(|| { + format!( + "prefixed length ({bytecode_len}) exceeds real length ({})", + raw.len() - 32 + ) + })?; + // Since slicing above succeeded, this one is safe. + let padding = &raw[(32 + bytecode_len)..]; + anyhow::ensure!( + padding.iter().all(|&b| b == 0), + "bytecode padding contains non-zero bytes" + ); + Ok(bytecode.to_vec()) +} + /// Opens a readonly transaction over the specified connection. pub(crate) async fn open_readonly_transaction<'r>( conn: &'r mut Connection<'_, Core>, @@ -66,3 +90,15 @@ macro_rules! report_filter { ReportFilter::new($interval, &LAST_TIMESTAMP) }}; } + +#[cfg(test)] +mod tests { + use super::*; + use crate::testonly::{PROCESSED_EVM_BYTECODE, RAW_EVM_BYTECODE}; + + #[test] + fn preparing_evm_bytecode() { + let prepared = prepare_evm_bytecode(RAW_EVM_BYTECODE).unwrap(); + assert_eq!(prepared, PROCESSED_EVM_BYTECODE); + } +} diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs index de7635263735..9f5e54a5f4f7 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs @@ -37,6 +37,15 @@ impl EnNamespaceServer for EnNamespace { .map_err(|err| self.current_method().map_err(err)) } + async fn block_metadata( + &self, + block_number: L2BlockNumber, + ) -> RpcResult> { + self.block_metadata_impl(block_number) + .await + .map_err(|err| self.current_method().map_err(err)) + } + async fn sync_tokens(&self, block_number: Option) -> RpcResult> { self.sync_tokens_impl(block_number) .await diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs index 15528c5b309b..cc2209a35d39 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs @@ -4,7 +4,7 @@ use zksync_types::{ Log, Transaction, TransactionId, TransactionReceipt, TransactionVariant, }, transaction_request::CallRequest, - web3::{Bytes, Index, SyncState}, + web3::{Bytes, Index, SyncState, U64Number}, Address, H256, U256, U64, }; use zksync_web3_decl::{ @@ -260,11 +260,11 @@ impl EthNamespaceServer for EthNamespace { async fn fee_history( &self, - block_count: U64, + block_count: U64Number, newest_block: BlockNumber, reward_percentiles: Vec, ) -> RpcResult { - self.fee_history_impl(block_count, newest_block, reward_percentiles) + self.fee_history_impl(block_count.into(), newest_block, reward_percentiles) .await .map_err(|err| self.current_method().map_err(err)) } diff --git a/core/node/api_server/src/web3/namespaces/en.rs b/core/node/api_server/src/web3/namespaces/en.rs index 721ca985ceb1..a09a0cb92fc7 100644 --- a/core/node/api_server/src/web3/namespaces/en.rs +++ b/core/node/api_server/src/web3/namespaces/en.rs @@ -1,5 +1,6 @@ use anyhow::Context as _; use zksync_config::{configs::EcosystemContracts, GenesisConfig}; +use zksync_consensus_roles::validator; use zksync_dal::{CoreDal, DalError}; use zksync_types::{ api::en, protocol_version::ProtocolSemanticVersion, tokens::TokenInfo, Address, L1BatchNumber, @@ -86,6 +87,36 @@ impl EnNamespace { ))) } + #[tracing::instrument(skip(self))] + pub async fn block_metadata_impl( + &self, + block_number: L2BlockNumber, + ) -> Result, Web3Error> { + let Some(meta) = self + .state + .acquire_connection() + .await? + // unwrap is ok, because we start outermost transaction. + .transaction_builder() + .unwrap() + // run readonly transaction to perform consistent reads. + .set_readonly() + .build() + .await + .context("TransactionBuilder::build()")? + .consensus_dal() + .block_metadata(validator::BlockNumber(block_number.0.into())) + .await? + else { + return Ok(None); + }; + Ok(Some(en::BlockMetadata( + zksync_protobuf::serde::Serialize + .proto_fmt(&meta, serde_json::value::Serializer) + .unwrap(), + ))) + } + pub(crate) fn current_method(&self) -> &MethodTracer { &self.state.current_method } diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index 44362dd098e0..4439fc257cfb 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -12,7 +12,7 @@ use zksync_types::{ web3::{self, Bytes, SyncInfo, SyncState}, AccountTreeId, L2BlockNumber, StorageKey, H256, L2_BASE_TOKEN_ADDRESS, U256, }; -use zksync_utils::u256_to_h256; +use zksync_utils::{bytecode::BytecodeMarker, u256_to_h256}; use zksync_web3_decl::{ error::Web3Error, types::{Address, Block, Filter, FilterChanges, Log, U64}, @@ -21,7 +21,7 @@ use zksync_web3_decl::{ use crate::{ execution_sandbox::BlockArgs, tx_sender::BinarySearchKind, - utils::open_readonly_transaction, + utils::{open_readonly_transaction, prepare_evm_bytecode}, web3::{backend_jsonrpsee::MethodTracer, metrics::API_METRICS, state::RpcState, TypedFilter}, }; @@ -397,7 +397,22 @@ impl EthNamespace { .get_contract_code_unchecked(address, block_number) .await .map_err(DalError::generalize)?; - Ok(contract_code.unwrap_or_default().into()) + let Some(contract_code) = contract_code else { + return Ok(Bytes::default()); + }; + // Check if the bytecode is an EVM bytecode, and if so, pre-process it correspondingly. + let marker = BytecodeMarker::new(contract_code.bytecode_hash); + let prepared_bytecode = if marker == Some(BytecodeMarker::Evm) { + prepare_evm_bytecode(&contract_code.bytecode).with_context(|| { + format!( + "malformed EVM bytecode at address {address:?}, hash = {:?}", + contract_code.bytecode_hash + ) + })? + } else { + contract_code.bytecode + }; + Ok(prepared_bytecode.into()) } pub fn chain_id_impl(&self) -> U64 { @@ -668,7 +683,7 @@ impl EthNamespace { pub async fn fee_history_impl( &self, - block_count: U64, + block_count: u64, newest_block: BlockNumber, reward_percentiles: Vec, ) -> Result { @@ -676,10 +691,7 @@ impl EthNamespace { .set_block_id(BlockId::Number(newest_block)); // Limit `block_count`. - let block_count = block_count - .as_u64() - .min(self.state.api_config.fee_history_limit) - .max(1); + let block_count = block_count.clamp(1, self.state.api_config.fee_history_limit); let mut connection = self.state.acquire_connection().await?; let newest_l2_block = self diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index 632e263c6536..a8d90c281a75 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -21,6 +21,7 @@ use zksync_multivm::interface::{ TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionMetrics, }; +use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{ create_l1_batch, create_l1_batch_metadata, create_l2_block, create_l2_transaction, @@ -31,17 +32,22 @@ use zksync_system_constants::{ }; use zksync_types::{ api, - block::{pack_block_info, L2BlockHeader}, + block::{pack_block_info, L2BlockHasher, L2BlockHeader}, + fee_model::{BatchFeeInput, FeeParams}, get_nonce_key, l2::L2Tx, storage::get_code_key, + system_contracts::get_system_smart_contracts, tokens::{TokenInfo, TokenMetadata}, tx::IncludedTxLocation, utils::{storage_key_for_eth_balance, storage_key_for_standard_token_balance}, AccountTreeId, Address, L1BatchNumber, Nonce, ProtocolVersionId, StorageKey, StorageLog, H256, U256, U64, }; -use zksync_utils::u256_to_h256; +use zksync_utils::{ + bytecode::{hash_bytecode, hash_evm_bytecode}, + u256_to_h256, +}; use zksync_vm_executor::oneshot::MockOneshotExecutor; use zksync_web3_decl::{ client::{Client, DynClient, L2}, @@ -50,7 +56,7 @@ use zksync_web3_decl::{ http_client::HttpClient, rpc_params, types::{ - error::{ErrorCode, OVERSIZED_RESPONSE_CODE}, + error::{ErrorCode, INVALID_PARAMS_CODE, OVERSIZED_RESPONSE_CODE}, ErrorObjectOwned, }, }, @@ -58,7 +64,10 @@ use zksync_web3_decl::{ }; use super::*; -use crate::web3::testonly::TestServerBuilder; +use crate::{ + testonly::{PROCESSED_EVM_BYTECODE, RAW_EVM_BYTECODE}, + web3::testonly::TestServerBuilder, +}; mod debug; mod filters; @@ -428,6 +437,14 @@ async fn store_events( Ok((tx_location, events)) } +fn scaled_sensible_fee_input(scale: f64) -> BatchFeeInput { + ::default_batch_fee_input_scaled( + FeeParams::sensible_v1_default(), + scale, + scale, + ) +} + #[derive(Debug)] struct HttpServerBasicsTest; @@ -625,7 +642,7 @@ impl HttpTest for StorageAccessWithSnapshotRecovery { fn storage_initialization(&self) -> StorageInitialization { let address = Address::repeat_byte(1); let code_key = get_code_key(&address); - let code_hash = H256::repeat_byte(2); + let code_hash = hash_bytecode(&[0; 32]); let balance_key = storage_key_for_eth_balance(&address); let logs = vec![ StorageLog::new_write_log(code_key, code_hash), @@ -1102,3 +1119,233 @@ impl HttpTest for GenesisConfigTest { async fn tracing_genesis_config() { test_http_server(GenesisConfigTest).await; } + +#[derive(Debug)] +struct GetBytecodeTest; + +impl GetBytecodeTest { + async fn insert_evm_bytecode( + connection: &mut Connection<'_, Core>, + at_block: L2BlockNumber, + address: Address, + ) -> anyhow::Result<()> { + let evm_bytecode_hash = hash_evm_bytecode(RAW_EVM_BYTECODE); + let code_log = StorageLog::new_write_log(get_code_key(&address), evm_bytecode_hash); + connection + .storage_logs_dal() + .append_storage_logs(at_block, &[code_log]) + .await?; + + let factory_deps = HashMap::from([(evm_bytecode_hash, RAW_EVM_BYTECODE.to_vec())]); + connection + .factory_deps_dal() + .insert_factory_deps(at_block, &factory_deps) + .await?; + Ok(()) + } +} + +#[async_trait] +impl HttpTest for GetBytecodeTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let genesis_evm_address = Address::repeat_byte(1); + let mut connection = pool.connection().await?; + Self::insert_evm_bytecode(&mut connection, L2BlockNumber(0), genesis_evm_address).await?; + + for contract in get_system_smart_contracts(false) { + let bytecode = client + .get_code(*contract.account_id.address(), None) + .await?; + assert_eq!(bytecode.0, contract.bytecode); + } + + let bytecode = client.get_code(genesis_evm_address, None).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + + let latest_block_variants = [ + api::BlockNumber::Pending, + api::BlockNumber::Latest, + api::BlockNumber::Committed, + ]; + let latest_block_variants = latest_block_variants.map(api::BlockIdVariant::BlockNumber); + + let genesis_block_variants = [ + api::BlockIdVariant::BlockNumber(api::BlockNumber::Earliest), + api::BlockIdVariant::BlockNumber(api::BlockNumber::Number(0.into())), + api::BlockIdVariant::BlockHashObject(api::BlockHashObject { + block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + }), + ]; + for at_block in latest_block_variants + .into_iter() + .chain(genesis_block_variants) + { + println!("Testing {at_block:?} with genesis EVM code, latest block: 0"); + let bytecode = client.get_code(genesis_evm_address, Some(at_block)).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + } + + // Create another block with an EVM bytecode. + let new_bytecode_address = Address::repeat_byte(2); + let mut connection = pool.connection().await?; + let block_header = store_l2_block(&mut connection, L2BlockNumber(1), &[]).await?; + Self::insert_evm_bytecode(&mut connection, L2BlockNumber(1), new_bytecode_address).await?; + + let bytecode = client.get_code(genesis_evm_address, None).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + let bytecode = client.get_code(new_bytecode_address, None).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + + let new_block_variants = [ + api::BlockIdVariant::BlockNumber(api::BlockNumber::Number(1.into())), + api::BlockIdVariant::BlockHashObject(api::BlockHashObject { + block_hash: block_header.hash, + }), + ]; + for at_block in latest_block_variants.into_iter().chain(new_block_variants) { + println!("Testing {at_block:?} with new EVM code, latest block: 1"); + let bytecode = client + .get_code(new_bytecode_address, Some(at_block)) + .await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + } + for at_block in genesis_block_variants { + println!("Testing {at_block:?} with new EVM code, latest block: 1"); + let bytecode = client + .get_code(new_bytecode_address, Some(at_block)) + .await?; + assert!(bytecode.0.is_empty()); + } + + for at_block in latest_block_variants + .into_iter() + .chain(new_block_variants) + .chain(genesis_block_variants) + { + println!("Testing {at_block:?} with genesis EVM code, latest block: 1"); + let bytecode = client.get_code(genesis_evm_address, Some(at_block)).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + } + Ok(()) + } +} + +#[tokio::test] +async fn getting_bytecodes() { + test_http_server(GetBytecodeTest).await; +} + +#[derive(Debug)] +struct FeeHistoryTest; + +#[async_trait] +impl HttpTest for FeeHistoryTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let mut connection = pool.connection().await?; + let block1 = L2BlockHeader { + batch_fee_input: scaled_sensible_fee_input(1.0), + base_fee_per_gas: 100, + ..create_l2_block(1) + }; + store_custom_l2_block(&mut connection, &block1, &[]).await?; + let block2 = L2BlockHeader { + batch_fee_input: scaled_sensible_fee_input(2.0), + base_fee_per_gas: 200, + ..create_l2_block(2) + }; + store_custom_l2_block(&mut connection, &block2, &[]).await?; + + let all_pubdata_prices = [ + 0, + block1.batch_fee_input.fair_pubdata_price(), + block2.batch_fee_input.fair_pubdata_price(), + ] + .map(U256::from); + + let history = client + .fee_history(1_000.into(), api::BlockNumber::Latest, vec![]) + .await?; + assert_eq!(history.inner.oldest_block, 0.into()); + assert_eq!( + history.inner.base_fee_per_gas, + [0, 100, 200, 200].map(U256::from) // The latest value is duplicated + ); + assert_eq!(history.l2_pubdata_price, all_pubdata_prices); + // Values below are not filled. + assert_eq!(history.inner.gas_used_ratio, [0.0; 3]); + assert_eq!(history.inner.base_fee_per_blob_gas, [U256::zero(); 4]); + assert_eq!(history.inner.blob_gas_used_ratio, [0.0; 3]); + + // Check supplying hexadecimal block count + let hex_history: api::FeeHistory = client + .request( + "eth_feeHistory", + rpc_params!["0xaa", "latest", [] as [f64; 0]], + ) + .await?; + assert_eq!(hex_history, history); + + // ...and explicitly decimal count (which should've been supplied in the first call) for exhaustiveness + let dec_history: api::FeeHistory = client + .request( + "eth_feeHistory", + rpc_params![1_000, "latest", [] as [f64; 0]], + ) + .await?; + assert_eq!(dec_history, history); + + // Check partial histories: blocks 0..=1 + let history = client + .fee_history(1_000.into(), api::BlockNumber::Number(1.into()), vec![]) + .await?; + assert_eq!(history.inner.oldest_block, 0.into()); + assert_eq!( + history.inner.base_fee_per_gas, + [0, 100, 100].map(U256::from) + ); + assert_eq!(history.l2_pubdata_price, all_pubdata_prices[..2]); + + // Blocks 1..=2 + let history = client + .fee_history(2.into(), api::BlockNumber::Latest, vec![]) + .await?; + assert_eq!(history.inner.oldest_block, 1.into()); + assert_eq!( + history.inner.base_fee_per_gas, + [100, 200, 200].map(U256::from) + ); + assert_eq!(history.l2_pubdata_price, all_pubdata_prices[1..]); + + // Blocks 1..=1 + let history = client + .fee_history(1.into(), api::BlockNumber::Number(1.into()), vec![]) + .await?; + assert_eq!(history.inner.oldest_block, 1.into()); + assert_eq!(history.inner.base_fee_per_gas, [100, 100].map(U256::from)); + assert_eq!(history.l2_pubdata_price, all_pubdata_prices[1..2]); + + // Non-existing newest block. + let err = client + .fee_history(1000.into(), api::BlockNumber::Number(100.into()), vec![]) + .await + .unwrap_err(); + assert_matches!( + err, + ClientError::Call(err) if err.code() == INVALID_PARAMS_CODE + ); + Ok(()) + } +} + +#[tokio::test] +async fn getting_fee_history() { + test_http_server(FeeHistoryTest).await; +} diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index e29ea246213b..1f843e06fabf 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -12,14 +12,10 @@ use api::state_override::{OverrideAccount, StateOverride}; use zksync_multivm::interface::{ ExecutionResult, VmExecutionLogs, VmExecutionResultAndLogs, VmRevertReason, }; -use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_types::{ - api::ApiStorageLog, - fee_model::{BatchFeeInput, FeeParams}, - get_intrinsic_constants, - transaction_request::CallRequest, - K256PrivateKey, L2ChainId, PackedEthSignature, StorageLogKind, StorageLogWithPreviousValue, - U256, + api::ApiStorageLog, fee_model::BatchFeeInput, get_intrinsic_constants, + transaction_request::CallRequest, K256PrivateKey, L2ChainId, PackedEthSignature, + StorageLogKind, StorageLogWithPreviousValue, U256, }; use zksync_utils::u256_to_h256; use zksync_vm_executor::oneshot::MockOneshotExecutor; @@ -42,11 +38,7 @@ impl ExpectedFeeInput { fn expect_for_block(&self, number: api::BlockNumber, scale: f64) { *self.0.lock().unwrap() = match number { api::BlockNumber::Number(number) => create_l2_block(number.as_u32()).batch_fee_input, - _ => ::default_batch_fee_input_scaled( - FeeParams::sensible_v1_default(), - scale, - scale, - ), + _ => scaled_sensible_fee_input(scale), }; } @@ -165,12 +157,7 @@ impl HttpTest for CallTest { // Check that the method handler fetches fee inputs for recent blocks. To do that, we create a new block // with a large fee input; it should be loaded by `ApiFeeInputProvider` and override the input provided by the wrapped mock provider. let mut block_header = create_l2_block(2); - block_header.batch_fee_input = - ::default_batch_fee_input_scaled( - FeeParams::sensible_v1_default(), - 2.5, - 2.5, - ); + block_header.batch_fee_input = scaled_sensible_fee_input(2.5); store_custom_l2_block(&mut connection, &block_header, &[]).await?; // Fee input is not scaled further as per `ApiFeeInputProvider` implementation self.fee_input.expect_custom(block_header.batch_fee_input); @@ -607,12 +594,7 @@ impl HttpTest for TraceCallTest { // Check that the method handler fetches fee inputs for recent blocks. To do that, we create a new block // with a large fee input; it should be loaded by `ApiFeeInputProvider` and override the input provided by the wrapped mock provider. let mut block_header = create_l2_block(2); - block_header.batch_fee_input = - ::default_batch_fee_input_scaled( - FeeParams::sensible_v1_default(), - 3.0, - 3.0, - ); + block_header.batch_fee_input = scaled_sensible_fee_input(3.0); store_custom_l2_block(&mut connection, &block_header, &[]).await?; // Fee input is not scaled further as per `ApiFeeInputProvider` implementation self.fee_input.expect_custom(block_header.batch_fee_input); diff --git a/core/node/block_reverter/src/tests.rs b/core/node/block_reverter/src/tests.rs index b29d01af39a4..85d894b7fd57 100644 --- a/core/node/block_reverter/src/tests.rs +++ b/core/node/block_reverter/src/tests.rs @@ -87,6 +87,7 @@ async fn setup_storage(storage: &mut Connection<'_, Core>, storage_logs: &[Stora system_logs: vec![], protocol_version: Some(ProtocolVersionId::latest()), pubdata_input: None, + fee_address: Default::default(), }; storage .blocks_dal() diff --git a/core/node/consensus/src/batch.rs b/core/node/consensus/src/batch.rs deleted file mode 100644 index af38f446c1b3..000000000000 --- a/core/node/consensus/src/batch.rs +++ /dev/null @@ -1,275 +0,0 @@ -//! L1 Batch representation for sending over p2p network. -use anyhow::Context as _; -use zksync_concurrency::{ctx, error::Wrap as _}; -use zksync_consensus_roles::validator; -use zksync_dal::consensus_dal::Payload; -use zksync_l1_contract_interface::i_executor; -use zksync_metadata_calculator::api_server::{TreeApiClient, TreeEntryWithProof}; -use zksync_system_constants as constants; -use zksync_types::{ - abi, - block::{unpack_block_info, L2BlockHasher}, - AccountTreeId, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, Transaction, H256, - U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::storage::ConnectionPool; - -/// Commitment to the last block of a batch. -pub(crate) struct LastBlockCommit { - /// Hash of the `StoredBatchInfo` which is stored on L1. - /// The hashed `StoredBatchInfo` contains a `root_hash` of the L2 state, - /// which contains state of the `SystemContext` contract, - /// which contains enough data to reconstruct the hash - /// of the last L2 block of the batch. - pub(crate) info: H256, -} - -/// Witness proving what is the last block of a batch. -/// Contains the hash and the number of the last block. -pub(crate) struct LastBlockWitness { - info: i_executor::structures::StoredBatchInfo, - protocol_version: ProtocolVersionId, - - current_l2_block_info: TreeEntryWithProof, - tx_rolling_hash: TreeEntryWithProof, - l2_block_hash_entry: TreeEntryWithProof, -} - -/// Commitment to an L1 batch. -pub(crate) struct L1BatchCommit { - pub(crate) number: L1BatchNumber, - pub(crate) this_batch: LastBlockCommit, - pub(crate) prev_batch: LastBlockCommit, -} - -/// L1Batch with witness that can be -/// verified against `L1BatchCommit`. -pub struct L1BatchWithWitness { - pub(crate) blocks: Vec, - pub(crate) this_batch: LastBlockWitness, - pub(crate) prev_batch: LastBlockWitness, -} - -impl LastBlockWitness { - /// Address of the SystemContext contract. - fn system_context_addr() -> AccountTreeId { - AccountTreeId::new(constants::SYSTEM_CONTEXT_ADDRESS) - } - - /// Storage key of the `SystemContext.current_l2_block_info` field. - fn current_l2_block_info_key() -> U256 { - StorageKey::new( - Self::system_context_addr(), - constants::SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ) - .hashed_key_u256() - } - - /// Storage key of the `SystemContext.tx_rolling_hash` field. - fn tx_rolling_hash_key() -> U256 { - StorageKey::new( - Self::system_context_addr(), - constants::SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ) - .hashed_key_u256() - } - - /// Storage key of the entry of the `SystemContext.l2BlockHash[]` array, corresponding to l2 - /// block with number i. - fn l2_block_hash_entry_key(i: L2BlockNumber) -> U256 { - let key = h256_to_u256(constants::SYSTEM_CONTEXT_CURRENT_L2_BLOCK_HASHES_POSITION) - + U256::from(i.0) % U256::from(constants::SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES); - StorageKey::new(Self::system_context_addr(), u256_to_h256(key)).hashed_key_u256() - } - - /// Loads a `LastBlockWitness` from storage. - async fn load( - ctx: &ctx::Ctx, - n: L1BatchNumber, - pool: &ConnectionPool, - tree: &dyn TreeApiClient, - ) -> ctx::Result { - let mut conn = pool.connection(ctx).await.wrap("pool.connection()")?; - let batch = conn - .batch(ctx, n) - .await - .wrap("batch()")? - .context("batch not in storage")?; - - let proofs = tree - .get_proofs( - n, - vec![ - Self::current_l2_block_info_key(), - Self::tx_rolling_hash_key(), - ], - ) - .await - .context("get_proofs()")?; - if proofs.len() != 2 { - return Err(anyhow::format_err!("proofs.len()!=2").into()); - } - let current_l2_block_info = proofs[0].clone(); - let tx_rolling_hash = proofs[1].clone(); - let (block_number, _) = unpack_block_info(current_l2_block_info.value.as_bytes().into()); - let prev = L2BlockNumber( - block_number - .checked_sub(1) - .context("L2BlockNumber underflow")? - .try_into() - .context("L2BlockNumber overflow")?, - ); - let proofs = tree - .get_proofs(n, vec![Self::l2_block_hash_entry_key(prev)]) - .await - .context("get_proofs()")?; - if proofs.len() != 1 { - return Err(anyhow::format_err!("proofs.len()!=1").into()); - } - let l2_block_hash_entry = proofs[0].clone(); - Ok(Self { - info: i_executor::structures::StoredBatchInfo::from(&batch), - protocol_version: batch - .header - .protocol_version - .context("missing protocol_version")?, - - current_l2_block_info, - tx_rolling_hash, - l2_block_hash_entry, - }) - } - - /// Verifies the proof against the commit and returns the hash - /// of the last L2 block. - pub(crate) fn verify(&self, comm: &LastBlockCommit) -> anyhow::Result<(L2BlockNumber, H256)> { - // Verify info. - anyhow::ensure!(comm.info == self.info.hash()); - - // Check the protocol version. - anyhow::ensure!( - self.protocol_version >= ProtocolVersionId::Version13, - "unsupported protocol version" - ); - - let (block_number, block_timestamp) = - unpack_block_info(self.current_l2_block_info.value.as_bytes().into()); - let prev = L2BlockNumber( - block_number - .checked_sub(1) - .context("L2BlockNumber underflow")? - .try_into() - .context("L2BlockNumber overflow")?, - ); - - // Verify merkle paths. - self.current_l2_block_info - .verify(Self::current_l2_block_info_key(), self.info.batch_hash) - .context("invalid merkle path for current_l2_block_info")?; - self.tx_rolling_hash - .verify(Self::tx_rolling_hash_key(), self.info.batch_hash) - .context("invalid merkle path for tx_rolling_hash")?; - self.l2_block_hash_entry - .verify(Self::l2_block_hash_entry_key(prev), self.info.batch_hash) - .context("invalid merkle path for l2_block_hash entry")?; - - let block_number = L2BlockNumber(block_number.try_into().context("block_number overflow")?); - // Derive hash of the last block - Ok(( - block_number, - L2BlockHasher::hash( - block_number, - block_timestamp, - self.l2_block_hash_entry.value, - self.tx_rolling_hash.value, - self.protocol_version, - ), - )) - } - - /// Last L2 block of the batch. - pub fn last_block(&self) -> validator::BlockNumber { - let (n, _) = unpack_block_info(self.current_l2_block_info.value.as_bytes().into()); - validator::BlockNumber(n) - } -} - -impl L1BatchWithWitness { - /// Loads an `L1BatchWithWitness` from storage. - pub(crate) async fn load( - ctx: &ctx::Ctx, - number: L1BatchNumber, - pool: &ConnectionPool, - tree: &dyn TreeApiClient, - ) -> ctx::Result { - let prev_batch = LastBlockWitness::load(ctx, number - 1, pool, tree) - .await - .with_wrap(|| format!("LastBlockWitness::make({})", number - 1))?; - let this_batch = LastBlockWitness::load(ctx, number, pool, tree) - .await - .with_wrap(|| format!("LastBlockWitness::make({number})"))?; - let mut conn = pool.connection(ctx).await.wrap("connection()")?; - let this = Self { - blocks: conn - .payloads( - ctx, - std::ops::Range { - start: prev_batch.last_block() + 1, - end: this_batch.last_block() + 1, - }, - ) - .await - .wrap("payloads()")?, - prev_batch, - this_batch, - }; - Ok(this) - } - - /// Verifies the L1Batch and witness against the commitment. - /// WARNING: the following fields of the payload are not currently verified: - /// * `l1_gas_price` - /// * `l2_fair_gas_price` - /// * `fair_pubdata_price` - /// * `virtual_blocks` - /// * `operator_address` - /// * `protocol_version` (present both in payload and witness, but neither has a commitment) - pub(crate) fn verify(&self, comm: &L1BatchCommit) -> anyhow::Result<()> { - let (last_number, last_hash) = self.this_batch.verify(&comm.this_batch)?; - let (mut prev_number, mut prev_hash) = self.prev_batch.verify(&comm.prev_batch)?; - anyhow::ensure!( - self.prev_batch - .info - .batch_number - .checked_add(1) - .context("batch_number overflow")? - == u64::from(comm.number.0) - ); - anyhow::ensure!(self.this_batch.info.batch_number == u64::from(comm.number.0)); - for (i, b) in self.blocks.iter().enumerate() { - anyhow::ensure!(b.l1_batch_number == comm.number); - anyhow::ensure!(b.protocol_version == self.this_batch.protocol_version); - anyhow::ensure!(b.last_in_batch == (i + 1 == self.blocks.len())); - prev_number += 1; - let mut hasher = L2BlockHasher::new(prev_number, b.timestamp, prev_hash); - for t in &b.transactions { - // Reconstruct transaction by converting it back and forth to `abi::Transaction`. - // This allows us to verify that the transaction actually matches the transaction - // hash. - // TODO: make consensus payload contain `abi::Transaction` instead. - // TODO: currently the payload doesn't contain the block number, which is - // annoying. Consider adding it to payload. - let t2 = Transaction::from_abi(abi::Transaction::try_from(t.clone())?, true)?; - anyhow::ensure!(t == &t2); - hasher.push_tx_hash(t.hash()); - } - prev_hash = hasher.finalize(self.this_batch.protocol_version); - anyhow::ensure!(prev_hash == b.hash); - } - anyhow::ensure!(prev_hash == last_hash); - anyhow::ensure!(prev_number == last_number); - Ok(()) - } -} diff --git a/core/node/consensus/src/config.rs b/core/node/consensus/src/config.rs index 3584d533f662..4ad7a551ab42 100644 --- a/core/node/consensus/src/config.rs +++ b/core/node/consensus/src/config.rs @@ -169,7 +169,6 @@ pub(super) fn executor( server_addr: cfg.server_addr, public_addr: net::Host(cfg.public_addr.0.clone()), max_payload_size: cfg.max_payload_size, - max_batch_size: cfg.max_batch_size, node_key: node_key(secrets) .context("node_key")? .context("missing node_key")?, @@ -184,6 +183,5 @@ pub(super) fn executor( gossip_static_outbound, rpc, debug_page, - batch_poll_interval: time::Duration::seconds(1), }) } diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index e4be8d9d6876..518a7ebb29aa 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -4,7 +4,7 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; -use zksync_consensus_storage::{BatchStore, BlockStore}; +use zksync_consensus_storage::{BlockStore, PersistentBlockStore as _}; use zksync_dal::consensus_dal; use zksync_node_sync::{fetcher::FetchedBlock, sync_action::ActionQueueSender, SyncState}; use zksync_types::L2BlockNumber; @@ -21,6 +21,10 @@ use crate::{ storage::{self, ConnectionPool}, }; +/// If less than TEMPORARY_FETCHER_THRESHOLD certificates are missing, +/// the temporary fetcher will stop fetching blocks. +pub(crate) const TEMPORARY_FETCHER_THRESHOLD: u64 = 10; + /// External node. pub(super) struct EN { pub(super) pool: ConnectionPool, @@ -32,8 +36,13 @@ impl EN { /// Task running a consensus node for the external node. /// It may be a validator, but it cannot be a leader (cannot propose blocks). /// - /// NOTE: Before starting the consensus node it fetches all the blocks + /// If `enable_pregenesis` is false, + /// before starting the consensus node it fetches all the blocks /// older than consensus genesis from the main node using json RPC. + /// NOTE: currently `enable_pregenesis` is hardcoded to `false` in `era.rs`. + /// True is used only in tests. Once the `block_metadata` RPC is enabled everywhere + /// this flag should be removed and fetching pregenesis blocks will always be done + /// over the gossip network. pub async fn run( self, ctx: &ctx::Ctx, @@ -41,6 +50,7 @@ impl EN { cfg: ConsensusConfig, secrets: ConsensusSecrets, build_version: Option, + enable_pregenesis: bool, ) -> anyhow::Result<()> { let attester = config::attester_key(&secrets).context("attester_key")?; @@ -72,13 +82,15 @@ impl EN { drop(conn); // Fetch blocks before the genesis. - self.fetch_blocks( - ctx, - &mut payload_queue, - Some(global_config.genesis.first_block), - ) - .await - .wrap("fetch_blocks()")?; + if !enable_pregenesis { + self.fetch_blocks( + ctx, + &mut payload_queue, + Some(global_config.genesis.first_block), + ) + .await + .wrap("fetch_blocks()")?; + } // Monitor the genesis of the main node. // If it changes, it means that a hard fork occurred and we need to reset the consensus state. @@ -102,21 +114,35 @@ impl EN { // Run consensus component. // External nodes have a payload queue which they use to fetch data from the main node. - let (store, runner) = Store::new(ctx, self.pool.clone(), Some(payload_queue)) - .await - .wrap("Store::new()")?; + let (store, runner) = Store::new( + ctx, + self.pool.clone(), + Some(payload_queue), + Some(self.client.clone()), + ) + .await + .wrap("Store::new()")?; s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + // Run the temporary fetcher until the certificates are backfilled. + // Temporary fetcher should be removed once json RPC syncing is fully deprecated. + s.spawn_bg({ + let store = store.clone(); + async { + let store = store; + self.temporary_block_fetcher(ctx, &store).await?; + tracing::info!( + "temporary block fetcher finished, switching to p2p fetching only" + ); + Ok(()) + } + }); + let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) .await .wrap("BlockStore::new()")?; s.spawn_bg(async { Ok(runner.run(ctx).await?) }); - let (batch_store, runner) = BatchStore::new(ctx, Box::new(store.clone())) - .await - .wrap("BatchStore::new()")?; - s.spawn_bg(async { Ok(runner.run(ctx).await?) }); - let attestation = Arc::new(attestation::Controller::new(attester)); s.spawn_bg(self.run_attestation_controller( ctx, @@ -127,7 +153,6 @@ impl EN { let executor = executor::Executor { config: config::executor(&cfg, &secrets, &global_config, build_version)?, block_store, - batch_store, validator: config::validator_key(&secrets) .context("validator_key")? .map(|key| executor::Validator { @@ -210,10 +235,13 @@ impl EN { "waiting for hash of batch {:?}", status.next_batch_to_attest ); - let hash = self - .pool - .wait_for_batch_hash(ctx, status.next_batch_to_attest) - .await?; + let hash = consensus_dal::batch_hash( + &self + .pool + .wait_for_batch_info(ctx, status.next_batch_to_attest, POLL_INTERVAL) + .await + .wrap("wait_for_batch_info()")?, + ); let Some(committee) = registry .attester_committee_for( ctx, @@ -348,8 +376,42 @@ impl EN { } } + /// Fetches blocks from the main node directly, until the certificates + /// are backfilled. This allows for smooth transition from json RPC to p2p block syncing. + pub(crate) async fn temporary_block_fetcher( + &self, + ctx: &ctx::Ctx, + store: &Store, + ) -> ctx::Result<()> { + const MAX_CONCURRENT_REQUESTS: usize = 30; + scope::run!(ctx, |ctx, s| async { + let (send, mut recv) = ctx::channel::bounded(MAX_CONCURRENT_REQUESTS); + s.spawn(async { + let Some(mut next) = store.next_block(ctx).await? else { + return Ok(()); + }; + while store.persisted().borrow().next().0 + TEMPORARY_FETCHER_THRESHOLD < next.0 { + let n = L2BlockNumber(next.0.try_into().context("overflow")?); + self.sync_state.wait_for_main_node_block(ctx, n).await?; + send.send(ctx, s.spawn(self.fetch_block(ctx, n))).await?; + next = next.next(); + } + drop(send); + Ok(()) + }); + while let Ok(block) = recv.recv_or_disconnected(ctx).await? { + store + .queue_next_fetched_block(ctx, block.join(ctx).await?) + .await + .wrap("queue_next_fetched_block()")?; + } + Ok(()) + }) + .await + } + /// Fetches blocks from the main node in range `[cursor.next()..end)`. - pub(super) async fn fetch_blocks( + async fn fetch_blocks( &self, ctx: &ctx::Ctx, queue: &mut storage::PayloadQueue, @@ -363,7 +425,7 @@ impl EN { s.spawn(async { let send = send; while end.map_or(true, |end| next < end) { - let n = L2BlockNumber(next.0.try_into().unwrap()); + let n = L2BlockNumber(next.0.try_into().context("overflow")?); self.sync_state.wait_for_main_node_block(ctx, n).await?; send.send(ctx, s.spawn(self.fetch_block(ctx, n))).await?; next = next.next(); diff --git a/core/node/consensus/src/era.rs b/core/node/consensus/src/era.rs index 3150f839680e..916b7cdd89a5 100644 --- a/core/node/consensus/src/era.rs +++ b/core/node/consensus/src/era.rs @@ -59,8 +59,18 @@ pub async fn run_external_node( is_validator = secrets.validator_key.is_some(), "running external node" ); - en.run(ctx, actions, cfg, secrets, Some(build_version)) - .await + // We will enable it once the main node on all envs supports + // `block_metadata()` JSON RPC method. + let enable_pregenesis = false; + en.run( + ctx, + actions, + cfg, + secrets, + Some(build_version), + enable_pregenesis, + ) + .await } None => { tracing::info!("running fetcher"); diff --git a/core/node/consensus/src/lib.rs b/core/node/consensus/src/lib.rs index ff9cdf865281..8bf078120aa9 100644 --- a/core/node/consensus/src/lib.rs +++ b/core/node/consensus/src/lib.rs @@ -6,10 +6,6 @@ use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; mod abi; -// Currently `batch` module is only used in tests, -// but will be used in production once batch syncing is implemented in consensus. -#[allow(unused)] -mod batch; mod config; mod en; pub mod era; diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index f80bfe58954c..5abbdc3503b3 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -5,7 +5,7 @@ use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; -use zksync_consensus_storage::{BatchStore, BlockStore}; +use zksync_consensus_storage::BlockStore; use zksync_dal::consensus_dal; use crate::{ @@ -43,7 +43,7 @@ pub async fn run_main_node( } // The main node doesn't have a payload queue as it produces all the L2 blocks itself. - let (store, runner) = Store::new(ctx, pool.clone(), None) + let (store, runner) = Store::new(ctx, pool.clone(), None, None) .await .wrap("Store::new()")?; s.spawn_bg(runner.run(ctx)); @@ -67,11 +67,6 @@ pub async fn run_main_node( .wrap("BlockStore::new()")?; s.spawn_bg(runner.run(ctx)); - let (batch_store, runner) = BatchStore::new(ctx, Box::new(store.clone())) - .await - .wrap("BatchStore::new()")?; - s.spawn_bg(runner.run(ctx)); - let attestation = Arc::new(attestation::Controller::new(attester)); s.spawn_bg(run_attestation_controller( ctx, @@ -83,7 +78,6 @@ pub async fn run_main_node( let executor = executor::Executor { config: config::executor(&cfg, &secrets, &global_config, None)?, block_store, - batch_store, validator: Some(executor::Validator { key: validator_key, replica_store: Box::new(store.clone()), @@ -135,9 +129,10 @@ async fn run_attestation_controller( "waiting for hash of batch {:?}", status.next_batch_to_attest ); - let hash = pool - .wait_for_batch_hash(ctx, status.next_batch_to_attest) + let info = pool + .wait_for_batch_info(ctx, status.next_batch_to_attest, POLL_INTERVAL) .await?; + let hash = consensus_dal::batch_hash(&info); let Some(committee) = registry .attester_committee_for(ctx, registry_addr, status.next_batch_to_attest) .await diff --git a/core/node/consensus/src/registry/tests.rs b/core/node/consensus/src/registry/tests.rs index 935cd6738918..89afc20e1d57 100644 --- a/core/node/consensus/src/registry/tests.rs +++ b/core/node/consensus/src/registry/tests.rs @@ -1,5 +1,5 @@ use rand::Rng as _; -use zksync_concurrency::{ctx, scope}; +use zksync_concurrency::{ctx, scope, time}; use zksync_consensus_roles::{attester, validator::testonly::Setup}; use zksync_test_account::Account; use zksync_types::ProtocolVersionId; @@ -7,6 +7,8 @@ use zksync_types::ProtocolVersionId; use super::*; use crate::storage::ConnectionPool; +const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(500); + /// Test checking that parsing logic matches the abi specified in the json file. #[test] fn test_consensus_registry_abi() { @@ -73,10 +75,12 @@ async fn test_attester_committee() { node.push_block(&txs).await; node.seal_batch().await; - pool.wait_for_batch(ctx, node.last_batch()).await?; + pool.wait_for_batch_info(ctx, node.last_batch(), POLL_INTERVAL) + .await + .wrap("wait_for_batch_info()")?; // Read the attester committee using the vm. - let batch = attester::BatchNumber(node.last_batch().0.into()); + let batch = attester::BatchNumber(node.last_batch().0); assert_eq!( Some(committee), registry diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index 0f9d7c8527f3..c30398498a94 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -1,18 +1,18 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; -use zksync_consensus_crypto::keccak256::Keccak256; use zksync_consensus_roles::{attester, attester::BatchNumber, validator}; -use zksync_consensus_storage::{self as storage, BatchStoreState}; -use zksync_dal::{consensus_dal, consensus_dal::Payload, Core, CoreDal, DalError}; +use zksync_consensus_storage as storage; +use zksync_dal::{ + consensus_dal::{AttestationStatus, BlockMetadata, GlobalConfig, Payload}, + Core, CoreDal, DalError, +}; use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; use zksync_node_sync::{fetcher::IoCursorExt as _, ActionQueueSender, SyncState}; use zksync_state_keeper::io::common::IoCursor; -use zksync_types::{ - commitment::L1BatchWithMetadata, fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber, -}; +use zksync_types::{fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber}; use zksync_vm_executor::oneshot::{BlockInfo, ResolvedBlockInfo}; -use super::{InsertCertificateError, PayloadQueue}; +use super::PayloadQueue; use crate::config; /// Context-aware `zksync_dal::ConnectionPool` wrapper. @@ -54,24 +54,24 @@ impl ConnectionPool { /// Waits for the `number` L1 batch hash. #[tracing::instrument(skip_all)] - pub async fn wait_for_batch_hash( + pub async fn wait_for_batch_info( &self, ctx: &ctx::Ctx, number: attester::BatchNumber, - ) -> ctx::Result { - const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(500); + interval: time::Duration, + ) -> ctx::Result { loop { - if let Some(hash) = self + if let Some(info) = self .connection(ctx) .await .wrap("connection()")? - .batch_hash(ctx, number) + .batch_info(ctx, number) .await - .with_wrap(|| format!("batch_hash({number})"))? + .with_wrap(|| format!("batch_info({number})"))? { - return Ok(hash); + return Ok(info); } - ctx.sleep(POLL_INTERVAL).await?; + ctx.sleep(interval).await?; } } } @@ -109,16 +109,23 @@ impl<'a> Connection<'a> { .map_err(DalError::generalize)?) } - /// Wrapper for `consensus_dal().block_payloads()`. - pub async fn payloads( + pub async fn batch_info( &mut self, ctx: &ctx::Ctx, - numbers: std::ops::Range, - ) -> ctx::Result> { + n: attester::BatchNumber, + ) -> ctx::Result> { + Ok(ctx.wait(self.0.consensus_dal().batch_info(n)).await??) + } + + /// Wrapper for `consensus_dal().block_metadata()`. + pub async fn block_metadata( + &mut self, + ctx: &ctx::Ctx, + number: validator::BlockNumber, + ) -> ctx::Result> { Ok(ctx - .wait(self.0.consensus_dal().block_payloads(numbers)) - .await? - .map_err(DalError::generalize)?) + .wait(self.0.consensus_dal().block_metadata(number)) + .await??) } /// Wrapper for `consensus_dal().block_certificate()`. @@ -138,7 +145,7 @@ impl<'a> Connection<'a> { &mut self, ctx: &ctx::Ctx, cert: &validator::CommitQC, - ) -> Result<(), InsertCertificateError> { + ) -> Result<(), super::InsertCertificateError> { Ok(ctx .wait(self.0.consensus_dal().insert_block_certificate(cert)) .await??) @@ -151,20 +158,10 @@ impl<'a> Connection<'a> { &mut self, ctx: &ctx::Ctx, cert: &attester::BatchQC, - ) -> Result<(), InsertCertificateError> { - use consensus_dal::InsertCertificateError as E; - let want_hash = self - .batch_hash(ctx, cert.message.number) - .await - .wrap("batch_hash()")? - .ok_or(E::MissingPayload)?; - if want_hash != cert.message.hash { - return Err(E::PayloadMismatch.into()); - } + ) -> Result<(), super::InsertCertificateError> { Ok(ctx .wait(self.0.consensus_dal().insert_batch_certificate(cert)) - .await? - .map_err(E::Other)?) + .await??) } /// Wrapper for `consensus_dal().upsert_attester_committee()`. @@ -203,37 +200,6 @@ impl<'a> Connection<'a> { .context("sqlx")?) } - /// Wrapper for `consensus_dal().batch_hash()`. - pub async fn batch_hash( - &mut self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - let n = L1BatchNumber(number.0.try_into().context("overflow")?); - let Some(meta) = ctx - .wait(self.0.blocks_dal().get_l1_batch_metadata(n)) - .await? - .context("get_l1_batch_metadata()")? - else { - return Ok(None); - }; - Ok(Some(attester::BatchHash(Keccak256::from_bytes( - StoredBatchInfo::from(&meta).hash().0, - )))) - } - - /// Wrapper for `blocks_dal().get_l1_batch_metadata()`. - pub async fn batch( - &mut self, - ctx: &ctx::Ctx, - number: L1BatchNumber, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.blocks_dal().get_l1_batch_metadata(number)) - .await? - .context("get_l1_batch_metadata()")?) - } - /// Wrapper for `FetcherCursor::new()`. pub async fn new_payload_queue( &mut self, @@ -249,10 +215,7 @@ impl<'a> Connection<'a> { } /// Wrapper for `consensus_dal().global_config()`. - pub async fn global_config( - &mut self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { + pub async fn global_config(&mut self, ctx: &ctx::Ctx) -> ctx::Result> { Ok(ctx.wait(self.0.consensus_dal().global_config()).await??) } @@ -260,7 +223,7 @@ impl<'a> Connection<'a> { pub async fn try_update_global_config( &mut self, ctx: &ctx::Ctx, - cfg: &consensus_dal::GlobalConfig, + cfg: &GlobalConfig, ) -> ctx::Result<()> { Ok(ctx .wait(self.0.consensus_dal().try_update_global_config(cfg)) @@ -273,14 +236,14 @@ impl<'a> Connection<'a> { Ok(ctx.wait(self.0.consensus_dal().next_block()).await??) } - /// Wrapper for `consensus_dal().block_certificates_range()`. + /// Wrapper for `consensus_dal().block_store_state()`. #[tracing::instrument(skip_all)] - pub(crate) async fn block_certificates_range( + pub(crate) async fn block_store_state( &mut self, ctx: &ctx::Ctx, ) -> ctx::Result { Ok(ctx - .wait(self.0.consensus_dal().block_certificates_range()) + .wait(self.0.consensus_dal().block_store_state()) .await??) } @@ -305,7 +268,7 @@ impl<'a> Connection<'a> { } tracing::info!("Performing a hard fork of consensus."); - let new = consensus_dal::GlobalConfig { + let new = GlobalConfig { genesis: validator::GenesisRaw { chain_id: spec.chain_id, fork_number: old.as_ref().map_or(validator::ForkNumber(0), |old| { @@ -334,38 +297,35 @@ impl<'a> Connection<'a> { &mut self, ctx: &ctx::Ctx, number: validator::BlockNumber, - ) -> ctx::Result> { - let Some(justification) = self - .block_certificate(ctx, number) - .await - .wrap("block_certificate()")? - else { + ) -> ctx::Result> { + let Some(payload) = self.payload(ctx, number).await.wrap("payload()")? else { return Ok(None); }; - let payload = self - .payload(ctx, number) + if let Some(justification) = self + .block_certificate(ctx, number) .await - .wrap("payload()")? - .context("L2 block disappeared from storage")?; - - Ok(Some(validator::FinalBlock { - payload: payload.encode(), - justification, - })) - } + .wrap("block_certificate()")? + { + return Ok(Some( + validator::FinalBlock { + payload: payload.encode(), + justification, + } + .into(), + )); + } - /// Wrapper for `blocks_dal().get_sealed_l1_batch_number()`. - #[tracing::instrument(skip_all)] - pub async fn get_last_batch_number( - &mut self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.blocks_dal().get_sealed_l1_batch_number()) - .await? - .context("get_sealed_l1_batch_number()")? - .map(|nr| attester::BatchNumber(nr.0 as u64))) + Ok(Some( + validator::PreGenesisBlock { + number, + payload: payload.encode(), + // We won't use justification until it is possible to verify + // payload against the L1 batch commitment. + justification: validator::Justification(vec![]), + } + .into(), + )) } /// Wrapper for `blocks_dal().get_l2_block_range_of_l1_batch()`. @@ -388,83 +348,11 @@ impl<'a> Connection<'a> { })) } - /// Construct the [attester::SyncBatch] for a given batch number. - pub async fn get_batch( - &mut self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - let Some((min, max)) = self - .get_l2_block_range_of_l1_batch(ctx, number) - .await - .context("get_l2_block_range_of_l1_batch()")? - else { - return Ok(None); - }; - - let payloads = self.payloads(ctx, min..max).await.wrap("payloads()")?; - let payloads = payloads.into_iter().map(|p| p.encode()).collect(); - - // TODO: Fill out the proof when we have the stateless L1 batch validation story finished. - // It is supposed to be a Merkle proof that the rolling hash of the batch has been included - // in the L1 system contract state tree. It is *not* the Ethereum state root hash, so producing - // it can be done without an L1 client, which is only required for validation. - let batch = attester::SyncBatch { - number, - payloads, - proof: Vec::new(), - }; - - Ok(Some(batch)) - } - - /// Construct the [storage::BatchStoreState] which contains the earliest batch and the last available [attester::SyncBatch]. - #[tracing::instrument(skip_all)] - pub async fn batches_range(&mut self, ctx: &ctx::Ctx) -> ctx::Result { - let first = self - .0 - .blocks_dal() - .get_earliest_l1_batch_number() - .await - .context("get_earliest_l1_batch_number()")?; - - let first = if first.is_some() { - first - } else { - self.0 - .snapshot_recovery_dal() - .get_applied_snapshot_status() - .await - .context("get_earliest_l1_batch_number()")? - .map(|s| s.l1_batch_number) - }; - - // TODO: In the future when we start filling in the `SyncBatch::proof` field, - // we can only run `get_batch` expecting `Some` result on numbers where the - // L1 state root hash is already available, so that we can produce some - // Merkle proof that the rolling hash of the L2 blocks in the batch has - // been included in the L1 state tree. At that point we probably can't - // call `get_last_batch_number` here, but something that indicates that - // the hashes/commitments on the L1 batch are ready and the thing has - // been included in L1; that potentially requires an API client as well. - let last = self - .get_last_batch_number(ctx) - .await - .context("get_last_batch_number()")?; - - Ok(BatchStoreState { - first: first - .map(|n| attester::BatchNumber(n.0 as u64)) - .unwrap_or(attester::BatchNumber(0)), - last, - }) - } - /// Wrapper for `consensus_dal().attestation_status()`. pub async fn attestation_status( &mut self, ctx: &ctx::Ctx, - ) -> ctx::Result> { + ) -> ctx::Result> { Ok(ctx .wait(self.0.consensus_dal().attestation_status()) .await? diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index cb8e039d7d01..7267d7e1c822 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -1,15 +1,18 @@ use std::sync::Arc; use anyhow::Context as _; -use tokio::sync::watch::Sender; use tracing::Instrument; use zksync_concurrency::{ctx, error::Wrap as _, scope, sync, time}; use zksync_consensus_bft::PayloadManager; -use zksync_consensus_roles::{attester, attester::BatchNumber, validator}; -use zksync_consensus_storage::{self as storage, BatchStoreState}; +use zksync_consensus_roles::validator; +use zksync_consensus_storage::{self as storage}; use zksync_dal::consensus_dal::{self, Payload}; use zksync_node_sync::fetcher::{FetchedBlock, FetchedTransaction}; use zksync_types::L2BlockNumber; +use zksync_web3_decl::{ + client::{DynClient, L2}, + namespaces::EnNamespaceClient as _, +}; use super::{Connection, PayloadQueue}; use crate::storage::{ConnectionPool, InsertCertificateError}; @@ -46,7 +49,7 @@ fn to_fetched_block( } /// Wrapper of `ConnectionPool` implementing `ReplicaStore`, `PayloadManager`, -/// `PersistentBlockStore` and `PersistentBatchStore`. +/// `PersistentBlockStore`. /// /// Contains queues to save Quorum Certificates received over gossip to the store /// as and when the payload they are over becomes available. @@ -59,8 +62,8 @@ pub(crate) struct Store { block_certificates: ctx::channel::UnboundedSender, /// Range of L2 blocks for which we have a QC persisted. blocks_persisted: sync::watch::Receiver, - /// Range of L1 batches we have persisted. - batches_persisted: sync::watch::Receiver, + /// Main node client. None if this node is the main node. + client: Option>>, } struct PersistedBlockState(sync::watch::Sender); @@ -69,7 +72,6 @@ struct PersistedBlockState(sync::watch::Sender); pub struct StoreRunner { pool: ConnectionPool, blocks_persisted: PersistedBlockState, - batches_persisted: sync::watch::Sender, block_certificates: ctx::channel::UnboundedReceiver, } @@ -78,22 +80,15 @@ impl Store { ctx: &ctx::Ctx, pool: ConnectionPool, payload_queue: Option, + client: Option>>, ) -> ctx::Result<(Store, StoreRunner)> { let mut conn = pool.connection(ctx).await.wrap("connection()")?; // Initial state of persisted blocks - let blocks_persisted = conn - .block_certificates_range(ctx) - .await - .wrap("block_certificates_range()")?; - - // Initial state of persisted batches - let batches_persisted = conn.batches_range(ctx).await.wrap("batches_range()")?; - + let blocks_persisted = conn.block_store_state(ctx).await.wrap("blocks_range()")?; drop(conn); let blocks_persisted = sync::watch::channel(blocks_persisted).0; - let batches_persisted = sync::watch::channel(batches_persisted).0; let (block_certs_send, block_certs_recv) = ctx::channel::unbounded(); Ok(( @@ -102,12 +97,11 @@ impl Store { block_certificates: block_certs_send, block_payloads: Arc::new(sync::Mutex::new(payload_queue)), blocks_persisted: blocks_persisted.subscribe(), - batches_persisted: batches_persisted.subscribe(), + client, }, StoreRunner { pool, blocks_persisted: PersistedBlockState(blocks_persisted), - batches_persisted, block_certificates: block_certs_recv, }, )) @@ -117,6 +111,30 @@ impl Store { async fn conn(&self, ctx: &ctx::Ctx) -> ctx::Result { self.pool.connection(ctx).await.wrap("connection") } + + /// Number of the next block to queue. + pub(crate) async fn next_block( + &self, + ctx: &ctx::Ctx, + ) -> ctx::OrCanceled> { + Ok(sync::lock(ctx, &self.block_payloads) + .await? + .as_ref() + .map(|p| p.next())) + } + + /// Queues the next block. + pub(crate) async fn queue_next_fetched_block( + &self, + ctx: &ctx::Ctx, + block: FetchedBlock, + ) -> ctx::Result<()> { + let mut payloads = sync::lock(ctx, &self.block_payloads).await?.into_async(); + if let Some(payloads) = &mut *payloads { + payloads.send(block).await.context("payloads.send()")?; + } + Ok(()) + } } impl PersistedBlockState { @@ -125,7 +143,7 @@ impl PersistedBlockState { /// If `persisted.first` is moved forward, it means that blocks have been pruned. /// If `persisted.last` is moved forward, it means that new blocks with certificates have been /// persisted. - #[tracing::instrument(skip_all, fields(first = %new.first, last = ?new.last.as_ref().map(|l| l.message.proposal.number)))] + #[tracing::instrument(skip_all, fields(first = %new.first, next = ?new.next()))] fn update(&self, new: storage::BlockStoreState) { self.0.send_if_modified(|p| { if &new == p { @@ -139,10 +157,11 @@ impl PersistedBlockState { }); } - /// Checks if the given certificate is exactly the next one that should - /// be persisted. + /// Checks if the given certificate should be eventually persisted. + /// Only certificates block store state is a range of blocks for which we already have + /// certificates and we need certs only for the later ones. fn should_be_persisted(&self, cert: &validator::CommitQC) -> bool { - self.0.borrow().next() == cert.header().number + self.0.borrow().next() <= cert.header().number } /// Appends the `cert` to `persisted` range. @@ -152,7 +171,7 @@ impl PersistedBlockState { if p.next() != cert.header().number { return false; } - p.last = Some(cert); + p.last = Some(storage::Last::Final(cert)); true }); } @@ -163,7 +182,6 @@ impl StoreRunner { let StoreRunner { pool, blocks_persisted, - batches_persisted, mut block_certificates, } = self; @@ -176,13 +194,13 @@ impl StoreRunner { ) -> ctx::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(1); - let range = pool + let state = pool .connection(ctx) .await? - .block_certificates_range(ctx) + .block_store_state(ctx) .await - .wrap("block_certificates_range()")?; - blocks_persisted.update(range); + .wrap("block_store_state()")?; + blocks_persisted.update(state); ctx.sleep(POLL_INTERVAL).await?; Ok(()) @@ -195,60 +213,6 @@ impl StoreRunner { } }); - #[tracing::instrument(skip_all, fields(l1_batch = %next_batch_number))] - async fn gossip_sync_batches_iteration( - ctx: &ctx::Ctx, - pool: &ConnectionPool, - next_batch_number: &mut BatchNumber, - batches_persisted: &Sender, - ) -> ctx::Result<()> { - const POLL_INTERVAL: time::Duration = time::Duration::seconds(1); - - let mut conn = pool.connection(ctx).await?; - if let Some(last_batch_number) = conn - .get_last_batch_number(ctx) - .await - .wrap("last_batch_number()")? - { - if last_batch_number >= *next_batch_number { - let range = conn.batches_range(ctx).await.wrap("batches_range()")?; - *next_batch_number = last_batch_number.next(); - tracing::info_span!("batches_persisted_send").in_scope(|| { - batches_persisted.send_replace(range); - }); - } - } - ctx.sleep(POLL_INTERVAL).await?; - - Ok(()) - } - - // NOTE: Running this update loop will trigger the gossip of `SyncBatches` which is currently - // pointless as there is no proof and we have to ignore them. We can disable it, but bear in - // mind that any node which gossips the availability will cause pushes and pulls in the consensus. - s.spawn::<()>(async { - // Loop updating `batches_persisted` whenever a new L1 batch is available in the database. - // We have to do this because the L1 batch is produced as L2 blocks are executed, - // which can happen on a different machine or in a different process, so we can't rely on some - // DAL method updating this memory construct. However I'm not sure that `BatchStoreState` - // really has to contain the full blown last batch, or whether it could have for example - // just the number of it. We can't just use the `attester::BatchQC`, which would make it - // analogous to the `BlockStoreState`, because the `SyncBatch` mechanism is for catching - // up with L1 batches from peers _without_ the QC, based on L1 inclusion proofs instead. - // Nevertheless since the `SyncBatch` contains all transactions for all L2 blocks, - // we can try to make it less frequent by querying just the last batch number first. - let mut next_batch_number = { batches_persisted.borrow().next() }; - loop { - gossip_sync_batches_iteration( - ctx, - &pool, - &mut next_batch_number, - &batches_persisted, - ) - .await?; - } - }); - #[tracing::instrument(skip_all)] async fn insert_block_certificates_iteration( ctx: &ctx::Ctx, @@ -339,7 +303,7 @@ impl storage::PersistentBlockStore for Store { &self, ctx: &ctx::Ctx, number: validator::BlockNumber, - ) -> ctx::Result { + ) -> ctx::Result { Ok(self .conn(ctx) .await? @@ -348,6 +312,41 @@ impl storage::PersistentBlockStore for Store { .context("not found")?) } + async fn verify_pregenesis_block( + &self, + ctx: &ctx::Ctx, + block: &validator::PreGenesisBlock, + ) -> ctx::Result<()> { + // We simply ask the main node for the payload hash and compare it against the received + // payload. + let meta = match &self.client { + None => self + .conn(ctx) + .await? + .block_metadata(ctx, block.number) + .await? + .context("metadata not in storage")?, + Some(client) => { + let meta = ctx + .wait(client.block_metadata(L2BlockNumber( + block.number.0.try_into().context("overflow")?, + ))) + .await? + .context("block_metadata()")? + .context("metadata not available")?; + zksync_protobuf::serde::Deserialize { + deny_unknown_fields: false, + } + .proto_fmt(&meta.0) + .context("deserialize()")? + } + }; + if meta.payload_hash != block.payload.hash() { + return Err(anyhow::format_err!("payload hash mismatch").into()); + } + Ok(()) + } + /// If actions queue is set (and the block has not been stored yet), /// the block will be translated into a sequence of actions. /// The received actions should be fed @@ -356,19 +355,21 @@ impl storage::PersistentBlockStore for Store { /// `store_next_block()` call will wait synchronously for the L2 block. /// Once the L2 block is observed in storage, `store_next_block()` will store a cert for this /// L2 block. - async fn queue_next_block( - &self, - ctx: &ctx::Ctx, - block: validator::FinalBlock, - ) -> ctx::Result<()> { + async fn queue_next_block(&self, ctx: &ctx::Ctx, block: validator::Block) -> ctx::Result<()> { let mut payloads = sync::lock(ctx, &self.block_payloads).await?.into_async(); + let (p, j) = match &block { + validator::Block::Final(block) => (&block.payload, Some(&block.justification)), + validator::Block::PreGenesis(block) => (&block.payload, None), + }; if let Some(payloads) = &mut *payloads { payloads - .send(to_fetched_block(block.number(), &block.payload).context("to_fetched_block")?) + .send(to_fetched_block(block.number(), p).context("to_fetched_block")?) .await - .context("payload_queue.send()")?; + .context("payloads.send()")?; + } + if let Some(justification) = j { + self.block_certificates.send(justification.clone()); } - self.block_certificates.send(block.justification); Ok(()) } } @@ -455,43 +456,3 @@ impl PayloadManager for Store { Ok(()) } } - -#[async_trait::async_trait] -impl storage::PersistentBatchStore for Store { - /// Range of batches persisted in storage. - fn persisted(&self) -> sync::watch::Receiver { - self.batches_persisted.clone() - } - - /// Returns the batch with the given number. - async fn get_batch( - &self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - self.conn(ctx) - .await? - .get_batch(ctx, number) - .await - .wrap("get_batch") - } - - /// Queue the batch to be persisted in storage. - /// - /// The caller [BatchStore] ensures that this is only called when the batch is the next expected one. - async fn queue_next_batch( - &self, - _ctx: &ctx::Ctx, - _batch: attester::SyncBatch, - ) -> ctx::Result<()> { - // Currently the gossiping of `SyncBatch` and the `BatchStoreState` is unconditionally started by the `Network::run_stream` in consensus, - // and as long as any node reports new batches available by updating the `PersistentBatchStore::persisted` here, the other nodes - // will start pulling the corresponding batches, which will end up being passed to this method. - // If we return an error here or panic, it will stop the whole consensus task tree due to the way scopes work, so instead just return immediately. - // In the future we have to validate the proof agains the L1 state root hash, which IIUC we can't do just yet. - - // Err(anyhow::format_err!("unimplemented: queue_next_batch should not be called until we have the stateless L1 batch story completed.").into()) - - Ok(()) - } -} diff --git a/core/node/consensus/src/storage/testonly.rs b/core/node/consensus/src/storage/testonly.rs index 5817e766c6b4..2aed011d23cf 100644 --- a/core/node/consensus/src/storage/testonly.rs +++ b/core/node/consensus/src/storage/testonly.rs @@ -7,8 +7,8 @@ use zksync_dal::CoreDal as _; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{recover, snapshot, Snapshot}; use zksync_types::{ - commitment::L1BatchWithMetadata, protocol_version::ProtocolSemanticVersion, - system_contracts::get_system_smart_contracts, L1BatchNumber, L2BlockNumber, ProtocolVersionId, + protocol_version::ProtocolSemanticVersion, system_contracts::get_system_smart_contracts, + L1BatchNumber, L2BlockNumber, ProtocolVersionId, }; use super::{Connection, ConnectionPool}; @@ -102,28 +102,6 @@ impl ConnectionPool { Ok(()) } - /// Waits for the `number` L1 batch. - pub async fn wait_for_batch( - &self, - ctx: &ctx::Ctx, - number: L1BatchNumber, - ) -> ctx::Result { - const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50); - loop { - if let Some(payload) = self - .connection(ctx) - .await - .wrap("connection()")? - .batch(ctx, number) - .await - .wrap("batch()")? - { - return Ok(payload); - } - ctx.sleep(POLL_INTERVAL).await?; - } - } - /// Takes a storage snapshot at the last sealed L1 batch. pub(crate) async fn snapshot(&self, ctx: &ctx::Ctx) -> ctx::Result { let mut conn = self.connection(ctx).await.wrap("connection()")?; @@ -152,21 +130,32 @@ impl ConnectionPool { Self(pool) } - /// Waits for `want_last` block to have certificate then fetches all L2 blocks with certificates. - pub async fn wait_for_block_certificates( + /// Waits for `want_last` block then fetches all L2 blocks with certificates. + pub async fn wait_for_blocks( &self, ctx: &ctx::Ctx, want_last: validator::BlockNumber, - ) -> ctx::Result> { - self.wait_for_block_certificate(ctx, want_last).await?; + ) -> ctx::Result> { + const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(100); + let state = loop { + let state = self + .connection(ctx) + .await + .wrap("connection()")? + .block_store_state(ctx) + .await + .wrap("block_store_state()")?; + tracing::info!("state.next() = {}", state.next()); + if state.next() > want_last { + break state; + } + ctx.sleep(POLL_INTERVAL).await?; + }; + + assert_eq!(want_last.next(), state.next()); let mut conn = self.connection(ctx).await.wrap("connection()")?; - let range = conn - .block_certificates_range(ctx) - .await - .wrap("certificates_range()")?; - assert_eq!(want_last.next(), range.next()); - let mut blocks: Vec = vec![]; - for i in range.first.0..range.next().0 { + let mut blocks: Vec = vec![]; + for i in state.first.0..state.next().0 { let i = validator::BlockNumber(i); let block = conn.block(ctx, i).await.context("block()")?.unwrap(); blocks.push(block); @@ -174,13 +163,13 @@ impl ConnectionPool { Ok(blocks) } - /// Same as `wait_for_certificates`, but additionally verifies all the blocks against genesis. - pub async fn wait_for_block_certificates_and_verify( + /// Same as `wait_for_blocks`, but additionally verifies all certificates. + pub async fn wait_for_blocks_and_verify_certs( &self, ctx: &ctx::Ctx, want_last: validator::BlockNumber, - ) -> ctx::Result> { - let blocks = self.wait_for_block_certificates(ctx, want_last).await?; + ) -> ctx::Result> { + let blocks = self.wait_for_blocks(ctx, want_last).await?; let cfg = self .connection(ctx) .await @@ -190,7 +179,9 @@ impl ConnectionPool { .wrap("genesis()")? .context("genesis is missing")?; for block in &blocks { - block.verify(&cfg.genesis).context(block.number())?; + if let validator::Block::Final(block) = block { + block.verify(&cfg.genesis).context(block.number())?; + } } Ok(blocks) } @@ -228,19 +219,11 @@ impl ConnectionPool { let registry = registry::Registry::new(cfg.genesis.clone(), self.clone()).await; for i in first.0..want_last.0 { let i = attester::BatchNumber(i); - let hash = conn - .batch_hash(ctx, i) - .await - .wrap("batch_hash()")? - .context("hash missing")?; let cert = conn .batch_certificate(ctx, i) .await .wrap("batch_certificate")? .context("cert missing")?; - if cert.message.hash != hash { - return Err(anyhow::format_err!("cert[{i:?}]: hash mismatch").into()); - } let committee = registry .attester_committee_for(ctx, registry_addr, i) .await @@ -255,28 +238,30 @@ impl ConnectionPool { pub async fn prune_batches( &self, ctx: &ctx::Ctx, - last_batch: L1BatchNumber, + last_batch: attester::BatchNumber, ) -> ctx::Result<()> { let mut conn = self.connection(ctx).await.context("connection()")?; - let (_, last_block) = ctx - .wait( - conn.0 - .blocks_dal() - .get_l2_block_range_of_l1_batch(last_batch), - ) - .await? - .context("get_l2_block_range_of_l1_batch()")? - .context("batch not found")?; - conn.0 - .pruning_dal() - .soft_prune_batches_range(last_batch, last_block) - .await - .context("soft_prune_batches_range()")?; - conn.0 - .pruning_dal() - .hard_prune_batches_range(last_batch, last_block) + let (_, last_block) = conn + .get_l2_block_range_of_l1_batch(ctx, last_batch) .await - .context("hard_prune_batches_range()")?; + .wrap("get_l2_block_range_of_l1_batch()")? + .context("batch not found")?; + let last_batch = L1BatchNumber(last_batch.0.try_into().context("oveflow")?); + let last_block = L2BlockNumber(last_block.0.try_into().context("oveflow")?); + ctx.wait( + conn.0 + .pruning_dal() + .soft_prune_batches_range(last_batch, last_block), + ) + .await? + .context("soft_prune_batches_range()")?; + ctx.wait( + conn.0 + .pruning_dal() + .hard_prune_batches_range(last_batch, last_block), + ) + .await? + .context("hard_prune_batches_range()")?; Ok(()) } } diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 04a2dfbc0835..98c0d6b08131 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -16,10 +16,7 @@ use zksync_consensus_crypto::TextFmt as _; use zksync_consensus_network as network; use zksync_consensus_roles::{attester, validator, validator::testonly::Setup}; use zksync_dal::{CoreDal, DalError}; -use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; -use zksync_metadata_calculator::{ - LazyAsyncTreeReader, MetadataCalculator, MetadataCalculatorConfig, -}; +use zksync_metadata_calculator::{MetadataCalculator, MetadataCalculatorConfig}; use zksync_node_api_server::web3::{state::InternalApiConfig, testonly::TestServerBuilder}; use zksync_node_genesis::GenesisParams; use zksync_node_sync::{ @@ -49,9 +46,8 @@ use zksync_types::{ use zksync_web3_decl::client::{Client, DynClient, L2}; use crate::{ - batch::{L1BatchCommit, L1BatchWithWitness, LastBlockCommit}, en, - storage::ConnectionPool, + storage::{ConnectionPool, Store}, }; /// Fake StateKeeper for tests. @@ -70,7 +66,6 @@ pub(super) struct StateKeeper { sync_state: SyncState, addr: sync::watch::Receiver>, pool: ConnectionPool, - tree_reader: LazyAsyncTreeReader, } #[derive(Clone)] @@ -78,6 +73,7 @@ pub(super) struct ConfigSet { net: network::Config, pub(super) config: config::ConsensusConfig, pub(super) secrets: config::ConsensusSecrets, + pub(super) enable_pregenesis: bool, } impl ConfigSet { @@ -87,11 +83,17 @@ impl ConfigSet { config: make_config(&net, None), secrets: make_secrets(&net, None), net, + enable_pregenesis: self.enable_pregenesis, } } } -pub(super) fn new_configs(rng: &mut impl Rng, setup: &Setup, seed_peers: usize) -> Vec { +pub(super) fn new_configs( + rng: &mut impl Rng, + setup: &Setup, + seed_peers: usize, + pregenesis: bool, +) -> Vec { let net_cfgs = network::testonly::new_configs(rng, setup, 0); let genesis_spec = config::GenesisSpec { chain_id: setup.genesis.chain_id.0.try_into().unwrap(), @@ -131,6 +133,7 @@ pub(super) fn new_configs(rng: &mut impl Rng, setup: &Setup, seed_peers: usize) config: make_config(&net, Some(genesis_spec.clone())), secrets: make_secrets(&net, setup.attester_keys.get(i).cloned()), net, + enable_pregenesis: pregenesis, }) .collect() } @@ -154,6 +157,7 @@ fn make_config( genesis_spec: Option, ) -> config::ConsensusConfig { config::ConsensusConfig { + port: Some(cfg.server_addr.port()), server_addr: *cfg.server_addr, public_addr: config::Host(cfg.public_addr.0.clone()), max_payload_size: usize::MAX, @@ -215,10 +219,11 @@ impl StateKeeper { .wait(IoCursor::for_fetcher(&mut conn.0)) .await? .context("IoCursor::new()")?; - let pending_batch = ctx - .wait(conn.0.blocks_dal().pending_batch_exists()) + let batch_sealed = ctx + .wait(conn.0.blocks_dal().get_unsealed_l1_batch()) .await? - .context("pending_batch_exists()")?; + .context("get_unsealed_l1_batch()")? + .is_none(); let (actions_sender, actions_queue) = ActionQueue::new(); let addr = sync::watch::channel(None).0; let sync_state = SyncState::default(); @@ -248,20 +253,18 @@ impl StateKeeper { let metadata_calculator = MetadataCalculator::new(config, None, pool.0.clone()) .await .context("MetadataCalculator::new()")?; - let tree_reader = metadata_calculator.tree_reader(); Ok(( Self { protocol_version, last_batch: cursor.l1_batch, last_block: cursor.next_l2_block - 1, last_timestamp: cursor.prev_l2_block_timestamp, - batch_sealed: !pending_batch, + batch_sealed, next_priority_op: PriorityOpId(1), actions_sender, sync_state: sync_state.clone(), addr: addr.subscribe(), pool: pool.clone(), - tree_reader, }, StateKeeperRunner { actions_queue, @@ -369,51 +372,14 @@ impl StateKeeper { } /// Batch of the `last_block`. - pub fn last_batch(&self) -> L1BatchNumber { - self.last_batch + pub fn last_batch(&self) -> attester::BatchNumber { + attester::BatchNumber(self.last_batch.0.into()) } /// Last L1 batch that has been sealed and will have /// metadata computed eventually. - pub fn last_sealed_batch(&self) -> L1BatchNumber { - self.last_batch - (!self.batch_sealed) as u32 - } - - /// Loads a commitment to L1 batch directly from the database. - // TODO: ideally, we should rather fake fetching it from Ethereum. - // We can use `zksync_eth_client::clients::MockEthereum` for that, - // which implements `EthInterface`. It should be enough to use - // `MockEthereum.with_call_handler()`. - pub async fn load_batch_commit( - &self, - ctx: &ctx::Ctx, - number: L1BatchNumber, - ) -> ctx::Result { - // TODO: we should mock the `eth_sender` as well. - let mut conn = self.pool.connection(ctx).await?; - let this = conn.batch(ctx, number).await?.context("missing batch")?; - let prev = conn - .batch(ctx, number - 1) - .await? - .context("missing batch")?; - Ok(L1BatchCommit { - number, - this_batch: LastBlockCommit { - info: StoredBatchInfo::from(&this).hash(), - }, - prev_batch: LastBlockCommit { - info: StoredBatchInfo::from(&prev).hash(), - }, - }) - } - - /// Loads an `L1BatchWithWitness`. - pub async fn load_batch_with_witness( - &self, - ctx: &ctx::Ctx, - n: L1BatchNumber, - ) -> ctx::Result { - L1BatchWithWitness::load(ctx, n, &self.pool, &self.tree_reader).await + pub fn last_sealed_batch(&self) -> attester::BatchNumber { + attester::BatchNumber((self.last_batch.0 - (!self.batch_sealed) as u32).into()) } /// Connects to the json RPC endpoint exposed by the state keeper. @@ -455,6 +421,40 @@ impl StateKeeper { .await } + pub async fn run_temporary_fetcher( + self, + ctx: &ctx::Ctx, + client: Box>, + ) -> ctx::Result<()> { + scope::run!(ctx, |ctx, s| async { + let payload_queue = self + .pool + .connection(ctx) + .await + .wrap("connection()")? + .new_payload_queue(ctx, self.actions_sender, self.sync_state.clone()) + .await + .wrap("new_payload_queue()")?; + let (store, runner) = Store::new( + ctx, + self.pool.clone(), + Some(payload_queue), + Some(client.clone()), + ) + .await + .wrap("Store::new()")?; + s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + en::EN { + pool: self.pool.clone(), + client, + sync_state: self.sync_state.clone(), + } + .temporary_block_fetcher(ctx, &store) + .await + }) + .await + } + /// Runs consensus node for the external node. pub async fn run_consensus( self, @@ -473,6 +473,7 @@ impl StateKeeper { cfgs.config, cfgs.secrets, cfgs.net.build_version, + cfgs.enable_pregenesis, ) .await } diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs index 35d849ae6169..2701a986e9e9 100644 --- a/core/node/consensus/src/tests/attestation.rs +++ b/core/node/consensus/src/tests/attestation.rs @@ -1,6 +1,6 @@ use anyhow::Context as _; use rand::Rng as _; -use test_casing::test_casing; +use test_casing::{test_casing, Product}; use tracing::Instrument as _; use zksync_concurrency::{ctx, error::Wrap, scope}; use zksync_consensus_roles::{ @@ -9,10 +9,10 @@ use zksync_consensus_roles::{ }; use zksync_dal::consensus_dal; use zksync_test_account::Account; -use zksync_types::{L1BatchNumber, ProtocolVersionId}; +use zksync_types::ProtocolVersionId; use zksync_web3_decl::namespaces::EnNamespaceClient as _; -use super::VERSIONS; +use super::{POLL_INTERVAL, PREGENESIS, VERSIONS}; use crate::{ mn::run_main_node, registry::{testonly, Registry}, @@ -34,13 +34,13 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("validator"))); // Setup nontrivial genesis. - while sk.last_sealed_batch() < L1BatchNumber(3) { + while sk.last_sealed_batch() < attester::BatchNumber(3) { sk.push_random_blocks(rng, account, 10).await; } let mut setup = SetupSpec::new(rng, 3); setup.first_block = sk.last_block(); let first_batch = sk.last_batch(); - let setup = Setup::from(setup); + let setup = Setup::from_spec(rng, setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; conn.try_update_global_config( ctx, @@ -54,7 +54,9 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { .wrap("try_update_global_config()")?; // Make sure that the first_batch is actually sealed. sk.seal_batch().await; - pool.wait_for_batch(ctx, first_batch).await?; + pool.wait_for_batch_info(ctx, first_batch, POLL_INTERVAL) + .await + .wrap("wait_for_batch_info()")?; // Connect to API endpoint. let api = sk.connect(ctx).await?; @@ -77,18 +79,18 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { let status = fetch_status().await?; assert_eq!( status.next_batch_to_attest, - attester::BatchNumber(first_batch.0.into()) + attester::BatchNumber(first_batch.0) ); tracing::info!("Insert a cert"); { let mut conn = pool.connection(ctx).await?; let number = status.next_batch_to_attest; - let hash = conn.batch_hash(ctx, number).await?.unwrap(); + let info = conn.batch_info(ctx, number).await?.unwrap(); let gcfg = conn.global_config(ctx).await?.unwrap(); let m = attester::Batch { number, - hash, + hash: consensus_dal::batch_hash(&info), genesis: gcfg.genesis.hash(), }; let mut sigs = attester::MultiSig::default(); @@ -124,9 +126,9 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { // Test running a couple of attesters (which are also validators). // Main node is expected to collect all certificates. // External nodes are expected to just vote for the batch. -#[test_casing(2, VERSIONS)] +#[test_casing(4, Product((VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_multiple_attesters(version: ProtocolVersionId) { +async fn test_multiple_attesters(version: ProtocolVersionId, pregenesis: bool) { const NODES: usize = 4; zksync_concurrency::testonly::abort_on_panic(); @@ -135,7 +137,7 @@ async fn test_multiple_attesters(version: ProtocolVersionId) { let account = &mut Account::random(); let to_fund = &[account.address]; let setup = Setup::new(rng, 4); - let mut cfgs = new_configs(rng, &setup, NODES); + let mut cfgs = new_configs(rng, &setup, NODES, pregenesis); scope::run!(ctx, |ctx, s| async { let validator_pool = ConnectionPool::test(false, version).await; let (mut validator, runner) = StateKeeper::new(ctx, validator_pool.clone()).await?; @@ -235,7 +237,7 @@ async fn test_multiple_attesters(version: ProtocolVersionId) { } tracing::info!("Wait for the batches to be attested"); - let want_last = attester::BatchNumber(validator.last_sealed_batch().0.into()); + let want_last = attester::BatchNumber(validator.last_sealed_batch().0); validator_pool .wait_for_batch_certificates_and_verify(ctx, want_last, Some(registry_addr)) .await?; diff --git a/core/node/consensus/src/tests/batch.rs b/core/node/consensus/src/tests/batch.rs deleted file mode 100644 index f0cae7f2c02e..000000000000 --- a/core/node/consensus/src/tests/batch.rs +++ /dev/null @@ -1,124 +0,0 @@ -use test_casing::{test_casing, Product}; -use zksync_concurrency::{ctx, scope}; -use zksync_consensus_roles::validator; -use zksync_test_account::Account; -use zksync_types::{L1BatchNumber, ProtocolVersionId}; - -use super::{FROM_SNAPSHOT, VERSIONS}; -use crate::{storage::ConnectionPool, testonly}; - -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] -#[tokio::test] -async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersionId) { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - let pool = ConnectionPool::test(from_snapshot, version).await; - let account = &mut Account::random(); - - // Fill storage with unsigned L2 blocks and L1 batches in a way that the - // last L1 batch is guaranteed to have some L2 blocks executed in it. - scope::run!(ctx, |ctx, s| async { - // Start state keeper. - let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run(ctx)); - - for _ in 0..3 { - for _ in 0..2 { - sk.push_random_block(rng, account).await; - } - sk.seal_batch().await; - } - sk.push_random_block(rng, account).await; - - pool.wait_for_payload(ctx, sk.last_block()).await?; - - Ok(()) - }) - .await - .unwrap(); - - // Now we can try to retrieve the batch. - scope::run!(ctx, |ctx, _s| async { - let mut conn = pool.connection(ctx).await?; - let batches = conn.batches_range(ctx).await?; - let last = batches.last.expect("last is set"); - let (min, max) = conn - .get_l2_block_range_of_l1_batch(ctx, last) - .await? - .unwrap(); - - let last_batch = conn - .get_batch(ctx, last) - .await? - .expect("last batch can be retrieved"); - - assert_eq!( - last_batch.payloads.len(), - (max.0 - min.0) as usize, - "all block payloads present" - ); - - let first_payload = last_batch - .payloads - .first() - .expect("last batch has payloads"); - - let want_payload = conn.payload(ctx, min).await?.expect("payload is in the DB"); - let want_payload = want_payload.encode(); - - assert_eq!( - first_payload, &want_payload, - "first payload is the right number" - ); - - anyhow::Ok(()) - }) - .await - .unwrap(); -} - -/// Tests that generated L1 batch witnesses can be verified successfully. -/// TODO: add tests for verification failures. -#[test_casing(2, VERSIONS)] -#[tokio::test] -async fn test_batch_witness(version: ProtocolVersionId) { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - let account = &mut Account::random(); - let to_fund = &[account.address]; - - scope::run!(ctx, |ctx, s| async { - let pool = ConnectionPool::from_genesis(version).await; - let (mut node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run_real(ctx, to_fund)); - - tracing::info!("analyzing storage"); - { - let mut conn = pool.connection(ctx).await.unwrap(); - let mut n = validator::BlockNumber(0); - while let Some(p) = conn.payload(ctx, n).await? { - tracing::info!("block[{n}] = {p:?}"); - n = n + 1; - } - } - - // Seal a bunch of batches. - node.push_random_blocks(rng, account, 10).await; - node.seal_batch().await; - pool.wait_for_batch(ctx, node.last_sealed_batch()).await?; - // We can verify only 2nd batch onward, because - // batch witness verifies parent of the last block of the - // previous batch (and 0th batch contains only 1 block). - for n in 2..=node.last_sealed_batch().0 { - let n = L1BatchNumber(n); - let batch_with_witness = node.load_batch_with_witness(ctx, n).await?; - let commit = node.load_batch_commit(ctx, n).await?; - batch_with_witness.verify(&commit)?; - } - Ok(()) - }) - .await - .unwrap(); -} diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 52abe3c810c5..8da17cfba8ac 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -2,29 +2,121 @@ use anyhow::Context as _; use rand::Rng as _; use test_casing::{test_casing, Product}; use tracing::Instrument as _; -use zksync_concurrency::{ctx, error::Wrap as _, scope}; +use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_config::configs::consensus as config; use zksync_consensus_crypto::TextFmt as _; use zksync_consensus_roles::{ node, validator, validator::testonly::{Setup, SetupSpec}, }; -use zksync_consensus_storage::BlockStore; +use zksync_consensus_storage::{BlockStore, PersistentBlockStore}; use zksync_dal::consensus_dal; use zksync_test_account::Account; use zksync_types::ProtocolVersionId; +use zksync_web3_decl::namespaces::EnNamespaceClient as _; use crate::{ + en::TEMPORARY_FETCHER_THRESHOLD, mn::run_main_node, storage::{ConnectionPool, Store}, testonly, }; mod attestation; -mod batch; const VERSIONS: [ProtocolVersionId; 2] = [ProtocolVersionId::latest(), ProtocolVersionId::next()]; const FROM_SNAPSHOT: [bool; 2] = [true, false]; +const PREGENESIS: [bool; 2] = [true, false]; +const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(500); + +#[test_casing(2, VERSIONS)] +#[tokio::test] +async fn test_verify_pregenesis_block(version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + let account = &mut Account::random(); + let mut setup = SetupSpec::new(rng, 3); + setup.first_block = validator::BlockNumber(1000); + let setup = Setup::from_spec(rng, setup); + let cfg = consensus_dal::GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: None, + seed_peers: [].into(), + }; + + scope::run!(ctx, |ctx, s| async { + tracing::info!("Start state keeper."); + let pool = ConnectionPool::test(/*from_snapshot=*/ false, version).await; + pool.connection(ctx) + .await + .wrap("connection()")? + .try_update_global_config(ctx, &cfg) + .await + .wrap("try_update_global_config()")?; + let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + + tracing::info!("Populate storage with a bunch of blocks."); + sk.push_random_blocks(rng, account, 5).await; + sk.seal_batch().await; + let blocks: Vec<_> = pool + .wait_for_blocks(ctx, sk.last_block()) + .await + .context("wait_for_blocks()")? + .into_iter() + .map(|b| match b { + validator::Block::PreGenesis(b) => b, + _ => panic!(), + }) + .collect(); + assert!(!blocks.is_empty()); + + tracing::info!("Create another store"); + let pool = ConnectionPool::test(/*from_snapshot=*/ false, version).await; + pool.connection(ctx) + .await + .wrap("connection()")? + .try_update_global_config(ctx, &cfg) + .await + .wrap("try_update_global_config()")?; + let (store, runner) = Store::new( + ctx, + pool.clone(), + None, + Some(sk.connect(ctx).await.unwrap()), + ) + .await + .unwrap(); + s.spawn_bg(runner.run(ctx)); + + tracing::info!("All the blocks from the main node should be valid."); + for b in &blocks { + store.verify_pregenesis_block(ctx, b).await.unwrap(); + } + tracing::info!("Malformed blocks should not be valid"); + for b in &blocks { + let mut p = consensus_dal::Payload::decode(&b.payload).unwrap(); + // Arbitrary small change. + p.timestamp = rng.gen(); + store + .verify_pregenesis_block( + ctx, + &validator::PreGenesisBlock { + number: b.number, + justification: b.justification.clone(), + payload: p.encode(), + }, + ) + .await + .unwrap_err(); + } + + Ok(()) + }) + .await + .unwrap(); +} #[test_casing(2, VERSIONS)] #[tokio::test] @@ -36,7 +128,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { let account = &mut Account::random(); // Fill storage with unsigned L2 blocks. - // Fetch a suffix of blocks that we will generate (fake) certs for. + // Fetch a suffix of blocks that we will generate certs for. let want = scope::run!(ctx, |ctx, s| async { // Start state keeper. let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; @@ -44,8 +136,9 @@ async fn test_validator_block_store(version: ProtocolVersionId) { sk.push_random_blocks(rng, account, 10).await; pool.wait_for_payload(ctx, sk.last_block()).await?; let mut setup = SetupSpec::new(rng, 3); - setup.first_block = validator::BlockNumber(4); - let mut setup = Setup::from(setup); + setup.first_block = validator::BlockNumber(0); + setup.first_pregenesis_block = setup.first_block; + let mut setup = Setup::from_spec(rng, setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; conn.try_update_global_config( ctx, @@ -75,7 +168,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { // Insert blocks one by one and check the storage state. for (i, block) in want.iter().enumerate() { scope::run!(ctx, |ctx, s| async { - let (store, runner) = Store::new(ctx, pool.clone(), None).await.unwrap(); + let (store, runner) = Store::new(ctx, pool.clone(), None, None).await.unwrap(); s.spawn_bg(runner.run(ctx)); let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())).await.unwrap(); @@ -85,10 +178,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { .wait_until_persisted(ctx, block.number()) .await .unwrap(); - let got = pool - .wait_for_block_certificates(ctx, block.number()) - .await - .unwrap(); + let got = pool.wait_for_blocks(ctx, block.number()).await.unwrap(); assert_eq!(want[..=i], got); Ok(()) }) @@ -100,14 +190,14 @@ async fn test_validator_block_store(version: ProtocolVersionId) { // In the current implementation, consensus certificates are created asynchronously // for the L2 blocks constructed by the StateKeeper. This means that consensus actor // is effectively just back filling the consensus certificates for the L2 blocks in storage. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_validator(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { @@ -149,9 +239,9 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { tracing::info!("Verify all certificates"); pool - .wait_for_block_certificates_and_verify(ctx, sk.last_block()) + .wait_for_blocks_and_verify_certs(ctx, sk.last_block()) .await - .context("wait_for_block_certificates_and_verify()")?; + .context("wait_for_blocks_and_verify_certs()")?; Ok(()) }) .await @@ -164,14 +254,14 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { } // Test running a validator node and 2 full nodes recovered from different snapshots. -#[test_casing(2, VERSIONS)] +#[test_casing(4, Product((VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { +async fn test_nodes_from_various_snapshots(version: ProtocolVersionId, pregenesis: bool) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { @@ -226,15 +316,15 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { tracing::info!("produce more blocks and compare storages"); validator.push_random_blocks(rng, account, 5).await; let want = validator_pool - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?; // node stores should be suffixes for validator store. for got in [ node_pool - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?, node_pool2 - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?, ] { assert_eq!(want[want.len() - got.len()..], got[..]); @@ -245,14 +335,14 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { .unwrap(); } -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let mut validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let mut validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -304,12 +394,12 @@ async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId) { validator.push_random_blocks(rng, account, 5).await; let want_last = validator.last_block(); let want = validator_pool - .wait_for_block_certificates_and_verify(ctx, want_last) + .wait_for_blocks_and_verify_certs(ctx, want_last) .await?; assert_eq!( want, node_pool - .wait_for_block_certificates_and_verify(ctx, want_last) + .wait_for_blocks_and_verify_certs(ctx, want_last) .await? ); Ok(()) @@ -322,16 +412,16 @@ async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId) { // Test running a validator node and a couple of full nodes. // Validator is producing signed blocks and fetchers are expected to fetch // them directly or indirectly. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { const NODES: usize = 2; zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let account = &mut Account::random(); // topology: @@ -391,13 +481,15 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { // Note that block from before and after genesis have to be fetched. validator.push_random_blocks(rng, account, 5).await; let want_last = validator.last_block(); + tracing::info!("Waiting for the validator to produce block {want_last}."); let want = validator_pool - .wait_for_block_certificates_and_verify(ctx, want_last) + .wait_for_blocks_and_verify_certs(ctx, want_last) .await?; + tracing::info!("Waiting for the nodes to fetch block {want_last}."); for pool in &node_pools { assert_eq!( want, - pool.wait_for_block_certificates_and_verify(ctx, want_last) + pool.wait_for_blocks_and_verify_certs(ctx, want_last) .await? ); } @@ -408,16 +500,16 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { } // Test running external node (non-leader) validators. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { const NODES: usize = 3; zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, NODES); - let cfgs = testonly::new_configs(rng, &setup, 1); + let cfgs = testonly::new_configs(rng, &setup, 1, pregenesis); let account = &mut Account::random(); // Run all nodes in parallel. @@ -475,12 +567,12 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { main_node.push_random_blocks(rng, account, 5).await; let want_last = main_node.last_block(); let want = main_node_pool - .wait_for_block_certificates_and_verify(ctx, want_last) + .wait_for_blocks_and_verify_certs(ctx, want_last) .await?; for pool in &ext_node_pools { assert_eq!( want, - pool.wait_for_block_certificates_and_verify(ctx, want_last) + pool.wait_for_blocks_and_verify_certs(ctx, want_last) .await? ); } @@ -491,14 +583,18 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { } // Test fetcher back filling missing certs. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_p2p_fetcher_backfill_certs( + from_snapshot: bool, + version: ProtocolVersionId, + pregenesis: bool, +) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -555,10 +651,10 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg)); validator.push_random_blocks(rng, account, 3).await; let want = validator_pool - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?; let got = node_pool - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?; assert_eq!(want, got); Ok(()) @@ -571,14 +667,144 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV .unwrap(); } -#[test_casing(2, VERSIONS)] +// Test temporary fetcher fetching blocks if a lot of certs are missing. +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] +#[tokio::test] +async fn test_temporary_fetcher(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + // We force certs to be missing on EN by having 1 of the validators permanently offline. + // This way no blocks will be finalized at all, so no one will have certs. + let setup = Setup::new(rng, 2); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); + + scope::run!(ctx, |ctx, s| async { + tracing::info!("Spawn validator."); + let validator_pool = ConnectionPool::test(from_snapshot, version).await; + let (mut validator, runner) = + testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool.clone(), + )); + // API server needs at least 1 L1 batch to start. + validator.seal_batch().await; + let client = validator.connect(ctx).await?; + + // Wait for the consensus to be initialized. + while ctx.wait(client.consensus_global_config()).await??.is_none() { + ctx.sleep(time::Duration::milliseconds(100)).await?; + } + + let node_pool = ConnectionPool::test(from_snapshot, version).await; + + tracing::info!("Run centralized fetcher, so that there is a lot of certs missing."); + scope::run!(ctx, |ctx, s| async { + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(node.run_fetcher(ctx, client.clone())); + validator + .push_random_blocks(rng, account, TEMPORARY_FETCHER_THRESHOLD as usize + 1) + .await; + node_pool + .wait_for_payload(ctx, validator.last_block()) + .await?; + Ok(()) + }) + .await + .unwrap(); + + tracing::info!( + "Run p2p fetcher. Blocks should be fetched by the temporary fetcher anyway." + ); + scope::run!(ctx, |ctx, s| async { + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); + validator.push_random_blocks(rng, account, 5).await; + node_pool + .wait_for_payload(ctx, validator.last_block()) + .await?; + Ok(()) + }) + .await + .unwrap(); + Ok(()) + }) + .await + .unwrap(); +} + +// Test that temporary fetcher terminates once enough blocks have certs. +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] -async fn test_with_pruning(version: ProtocolVersionId) { +async fn test_temporary_fetcher_termination(from_snapshot: bool, version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + let setup = Setup::new(rng, 1); + let pregenesis = true; + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); + + scope::run!(ctx, |ctx, s| async { + tracing::info!("Spawn validator."); + let validator_pool = ConnectionPool::test(from_snapshot, version).await; + let (mut validator, runner) = + testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool.clone(), + )); + // API server needs at least 1 L1 batch to start. + validator.seal_batch().await; + let client = validator.connect(ctx).await?; + + let node_pool = ConnectionPool::test(from_snapshot, version).await; + + // Run the EN so the consensus is initialized on EN and wait for it to sync. + scope::run!(ctx, |ctx, s| async { + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); + validator.push_random_blocks(rng, account, 5).await; + node_pool + .wait_for_payload(ctx, validator.last_block()) + .await?; + Ok(()) + }) + .await + .unwrap(); + + // Run the temporary fetcher. It should terminate immediately, since EN is synced. + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + node.run_temporary_fetcher(ctx, client).await?; + + Ok(()) + }) + .await + .unwrap(); +} + +#[test_casing(4, Product((VERSIONS,PREGENESIS)))] +#[tokio::test] +async fn test_with_pruning(version: ProtocolVersionId, pregenesis: bool) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -642,27 +868,28 @@ async fn test_with_pruning(version: ProtocolVersionId) { validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; validator_pool - .wait_for_batch(ctx, validator.last_sealed_batch()) - .await?; + .wait_for_batch_info(ctx, validator.last_sealed_batch(), POLL_INTERVAL) + .await + .wrap("wait_for_batch_info()")?; // The main node is not supposed to be pruned. In particular `ConsensusDal::attestation_status` // does not look for where the last prune happened at, and thus if we prune the block genesis // points at, we might never be able to start the Executor. tracing::info!("Wait until the external node has all the batches we want to prune"); node_pool - .wait_for_batch(ctx, to_prune.next()) + .wait_for_batch_info(ctx, to_prune.next(), POLL_INTERVAL) .await - .context("wait_for_batch()")?; + .wrap("wait_for_batch_info()")?; tracing::info!("Prune some blocks and sync more"); node_pool .prune_batches(ctx, to_prune) .await - .context("prune_batches")?; + .wrap("prune_batches")?; validator.push_random_blocks(rng, account, 5).await; node_pool - .wait_for_block_certificates(ctx, validator.last_block()) + .wait_for_blocks(ctx, validator.last_block()) .await - .context("wait_for_block_certificates()")?; + .wrap("wait_for_blocks()")?; Ok(()) }) .await diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index 5c17add2e987..3e4c0ee30b94 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -392,6 +392,7 @@ pub async fn create_genesis_l1_batch( base_system_contracts.hashes(), protocol_version.minor, ); + let batch_fee_input = BatchFeeInput::pubdata_independent(0, 0, 0); let genesis_l2_block_header = L2BlockHeader { number: L2BlockNumber(0), @@ -402,7 +403,7 @@ pub async fn create_genesis_l1_batch( fee_account_address: Default::default(), base_fee_per_gas: 0, gas_per_pubdata_limit: get_max_gas_per_pubdata_byte(protocol_version.minor.into()), - batch_fee_input: BatchFeeInput::l1_pegged(0, 0), + batch_fee_input, base_system_contracts_hashes: base_system_contracts.hashes(), protocol_version: Some(protocol_version.minor), virtual_blocks: 0, @@ -418,7 +419,11 @@ pub async fn create_genesis_l1_batch( .await?; transaction .blocks_dal() - .insert_l1_batch( + .insert_l1_batch(genesis_l1_batch_header.to_unsealed_header(batch_fee_input)) + .await?; + transaction + .blocks_dal() + .mark_l1_batch_as_sealed( &genesis_l1_batch_header, &[], BlockGasCount::default(), diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index 7687595740a8..10fb2925015f 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -15,6 +15,7 @@ use zksync_state_keeper::{ updates::UpdatesManager, }; use zksync_types::{ + block::UnsealedL1BatchHeader, protocol_upgrade::ProtocolUpgradeTx, protocol_version::{ProtocolSemanticVersion, VersionPatch}, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, @@ -200,6 +201,14 @@ impl StateKeeperIO for ExternalIO { cursor.l1_batch ) })?; + storage + .blocks_dal() + .ensure_unsealed_l1_batch_exists( + l1_batch_env + .clone() + .into_unsealed_header(Some(system_env.version)), + ) + .await?; let data = load_pending_batch(&mut storage, system_env, l1_batch_env) .await .with_context(|| { @@ -236,6 +245,19 @@ impl StateKeeperIO for ExternalIO { "L2 block number mismatch: expected {}, got {first_l2_block_number}", cursor.next_l2_block ); + + self.pool + .connection_tagged("sync_layer") + .await? + .blocks_dal() + .insert_l1_batch(UnsealedL1BatchHeader { + number: cursor.l1_batch, + timestamp: params.first_l2_block.timestamp, + protocol_version: None, + fee_address: params.operator_address, + fee_input: params.fee_input, + }) + .await?; return Ok(Some(params)); } other => { diff --git a/core/node/node_sync/src/fetcher.rs b/core/node/node_sync/src/fetcher.rs index 51b9f7c7a060..3f8558ed0ac5 100644 --- a/core/node/node_sync/src/fetcher.rs +++ b/core/node/node_sync/src/fetcher.rs @@ -114,8 +114,8 @@ impl IoCursorExt for IoCursor { let mut this = Self::new(storage).await?; // It's important to know whether we have opened a new batch already or just sealed the previous one. // Depending on it, we must either insert `OpenBatch` item into the queue, or not. - let was_new_batch_open = storage.blocks_dal().pending_batch_exists().await?; - if !was_new_batch_open { + let unsealed_batch = storage.blocks_dal().get_unsealed_l1_batch().await?; + if unsealed_batch.is_none() { this.l1_batch -= 1; // Should continue from the last L1 batch present in the storage } Ok(this) @@ -201,3 +201,35 @@ impl IoCursorExt for IoCursor { new_actions } } + +#[cfg(test)] +mod tests { + use zksync_dal::{ConnectionPool, Core, CoreDal}; + use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; + use zksync_state_keeper::io::IoCursor; + use zksync_types::{block::UnsealedL1BatchHeader, L1BatchNumber}; + + use crate::fetcher::IoCursorExt; + + #[tokio::test] + async fn io_cursor_recognizes_empty_unsealed_batch() -> anyhow::Result<()> { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + insert_genesis_batch(&mut conn, &GenesisParams::mock()) + .await + .unwrap(); + conn.blocks_dal() + .insert_l1_batch(UnsealedL1BatchHeader { + number: L1BatchNumber(1), + timestamp: 1, + protocol_version: None, + fee_address: Default::default(), + fee_input: Default::default(), + }) + .await?; + + let io_cursor = IoCursor::for_fetcher(&mut conn).await?; + assert_eq!(io_cursor.l1_batch, L1BatchNumber(1)); + Ok(()) + } +} diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index 108283122bce..229f54132f76 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -14,8 +14,8 @@ use zksync_mempool::L2TxFilter; use zksync_multivm::{interface::Halt, utils::derive_base_fee_and_gas_per_pubdata}; use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_types::{ - protocol_upgrade::ProtocolUpgradeTx, utils::display_timestamp, Address, L1BatchNumber, - L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, U256, + block::UnsealedL1BatchHeader, protocol_upgrade::ProtocolUpgradeTx, utils::display_timestamp, + Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, U256, }; // TODO (SMA-1206): use seconds instead of milliseconds. use zksync_utils::time::millis_since_epoch; @@ -133,6 +133,15 @@ impl StateKeeperIO for MempoolIO { gas_per_pubdata: gas_per_pubdata as u32, }; + storage + .blocks_dal() + .ensure_unsealed_l1_batch_exists( + l1_batch_env + .clone() + .into_unsealed_header(Some(system_env.version)), + ) + .await?; + Ok(( cursor, Some(PendingBatchData { @@ -148,6 +157,30 @@ impl StateKeeperIO for MempoolIO { cursor: &IoCursor, max_wait: Duration, ) -> anyhow::Result> { + // Check if there is an existing unsealed batch + if let Some(unsealed_storage_batch) = self + .pool + .connection_tagged("state_keeper") + .await? + .blocks_dal() + .get_unsealed_l1_batch() + .await? + { + return Ok(Some(L1BatchParams { + protocol_version: unsealed_storage_batch + .protocol_version + .expect("unsealed batch is missing protocol version"), + validation_computational_gas_limit: self.validation_computational_gas_limit, + operator_address: unsealed_storage_batch.fee_address, + fee_input: unsealed_storage_batch.fee_input, + first_l2_block: L2BlockParams { + timestamp: unsealed_storage_batch.timestamp, + // This value is effectively ignored by the protocol. + virtual_blocks: 1, + }, + })); + } + let deadline = Instant::now() + max_wait; // Block until at least one transaction in the mempool can match the filter (or timeout happens). @@ -191,6 +224,19 @@ impl StateKeeperIO for MempoolIO { continue; } + self.pool + .connection_tagged("state_keeper") + .await? + .blocks_dal() + .insert_l1_batch(UnsealedL1BatchHeader { + number: cursor.l1_batch, + timestamp, + protocol_version: Some(protocol_version), + fee_address: self.fee_account, + fee_input: self.filter.fee_input, + }) + .await?; + return Ok(Some(L1BatchParams { protocol_version, validation_computational_gas_limit: self.validation_computational_gas_limit, diff --git a/core/node/state_keeper/src/io/mod.rs b/core/node/state_keeper/src/io/mod.rs index f8106fd2423b..0fc5ebb6c082 100644 --- a/core/node/state_keeper/src/io/mod.rs +++ b/core/node/state_keeper/src/io/mod.rs @@ -42,7 +42,7 @@ pub struct PendingBatchData { pub(crate) pending_l2_blocks: Vec, } -#[derive(Debug, Copy, Clone, Default)] +#[derive(Debug, Copy, Clone, Default, PartialEq)] pub struct L2BlockParams { /// The timestamp of the L2 block. pub timestamp: u64, @@ -58,7 +58,7 @@ pub struct L2BlockParams { } /// Parameters for a new L1 batch returned by [`StateKeeperIO::wait_for_new_batch_params()`]. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct L1BatchParams { /// Protocol version for the new L1 batch. pub protocol_version: ProtocolVersionId, diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index 97340d6496ab..3e11285e11f1 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -397,7 +397,7 @@ mod tests { let mut output_handler = OutputHandler::new(Box::new(persistence)) .with_handler(Box::new(TreeWritesPersistence::new(pool.clone()))); tokio::spawn(l2_block_sealer.run()); - execute_mock_batch(&mut output_handler).await; + execute_mock_batch(&mut output_handler, &pool).await; // Check that L2 block #1 and L1 batch #1 are persisted. let mut storage = pool.connection().await.unwrap(); @@ -446,9 +446,19 @@ mod tests { assert_eq!(actual_index, expected_index); } - async fn execute_mock_batch(output_handler: &mut OutputHandler) -> H256 { + async fn execute_mock_batch( + output_handler: &mut OutputHandler, + pool: &ConnectionPool, + ) -> H256 { let l1_batch_env = default_l1_batch_env(1, 1, Address::random()); let mut updates = UpdatesManager::new(&l1_batch_env, &default_system_env()); + pool.connection() + .await + .unwrap() + .blocks_dal() + .insert_l1_batch(l1_batch_env.into_unsealed_header(None)) + .await + .unwrap(); let tx = create_transaction(10, 100); let tx_hash = tx.hash(); @@ -533,7 +543,7 @@ mod tests { let mut output_handler = OutputHandler::new(Box::new(persistence)); tokio::spawn(l2_block_sealer.run()); - let tx_hash = execute_mock_batch(&mut output_handler).await; + let tx_hash = execute_mock_batch(&mut output_handler, &pool).await; // Check that the transaction is persisted. let mut storage = pool.connection().await.unwrap(); diff --git a/core/node/state_keeper/src/io/seal_logic/mod.rs b/core/node/state_keeper/src/io/seal_logic/mod.rs index 0dae7fae908a..5859d27786d9 100644 --- a/core/node/state_keeper/src/io/seal_logic/mod.rs +++ b/core/node/state_keeper/src/io/seal_logic/mod.rs @@ -132,6 +132,7 @@ impl UpdatesManager { protocol_version: Some(self.protocol_version()), system_logs: finished_batch.final_execution_state.system_logs.clone(), pubdata_input: finished_batch.pubdata_input.clone(), + fee_address: self.fee_account_address, }; let final_bootloader_memory = finished_batch @@ -141,7 +142,7 @@ impl UpdatesManager { transaction .blocks_dal() - .insert_l1_batch( + .mark_l1_batch_as_sealed( &l1_batch, &final_bootloader_memory, self.pending_l1_gas_count(), diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index e2a90f30691b..566eebf7ab72 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -556,3 +556,87 @@ async fn different_timestamp_for_l2_blocks_in_same_batch(commitment_mode: L1Batc .expect("no new L2 block params"); assert!(l2_block_params.timestamp > current_timestamp); } + +#[test_casing(2, COMMITMENT_MODES)] +#[tokio::test] +async fn continue_unsealed_batch_on_restart(commitment_mode: L1BatchCommitmentMode) { + let connection_pool = ConnectionPool::::test_pool().await; + let tester = Tester::new(commitment_mode); + tester.genesis(&connection_pool).await; + let mut storage = connection_pool.connection().await.unwrap(); + + let (mut mempool, mut mempool_guard) = + tester.create_test_mempool_io(connection_pool.clone()).await; + let (cursor, _) = mempool.initialize().await.unwrap(); + + // Insert a transaction into the mempool in order to open a new batch. + let tx_filter = l2_tx_filter( + &tester.create_batch_fee_input_provider().await, + ProtocolVersionId::latest().into(), + ) + .await + .unwrap(); + let tx = tester.insert_tx( + &mut mempool_guard, + tx_filter.fee_per_gas, + tx_filter.gas_per_pubdata, + ); + storage + .transactions_dal() + .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .await + .unwrap(); + + let old_l1_batch_params = mempool + .wait_for_new_batch_params(&cursor, Duration::from_secs(10)) + .await + .unwrap() + .expect("no batch params generated"); + + // Restart + drop((mempool, mempool_guard, cursor)); + let (mut mempool, _) = tester.create_test_mempool_io(connection_pool.clone()).await; + let (cursor, _) = mempool.initialize().await.unwrap(); + + let new_l1_batch_params = mempool + .wait_for_new_batch_params(&cursor, Duration::from_secs(10)) + .await + .unwrap() + .expect("no batch params generated"); + + assert_eq!(old_l1_batch_params, new_l1_batch_params); +} + +#[test_casing(2, COMMITMENT_MODES)] +#[tokio::test] +async fn insert_unsealed_batch_on_init(commitment_mode: L1BatchCommitmentMode) { + let connection_pool = ConnectionPool::::test_pool().await; + let mut tester = Tester::new(commitment_mode); + tester.genesis(&connection_pool).await; + let fee_input = BatchFeeInput::pubdata_independent(55, 555, 5555); + let tx_result = tester + .insert_l2_block(&connection_pool, 1, 5, fee_input) + .await; + tester + .insert_sealed_batch(&connection_pool, 1, &[tx_result]) + .await; + // Pre-insert L2 block without its unsealed L1 batch counterpart + tester.set_timestamp(2); + tester + .insert_l2_block(&connection_pool, 2, 5, fee_input) + .await; + + let (mut mempool, _) = tester.create_test_mempool_io(connection_pool.clone()).await; + // Initialization is supposed to recognize that the current L1 batch is not present in the DB and + // insert it itself. + let (cursor, _) = mempool.initialize().await.unwrap(); + + // Make sure we are able to fetch the newly inserted batch's params + let l1_batch_params = mempool + .wait_for_new_batch_params(&cursor, Duration::from_secs(10)) + .await + .unwrap() + .expect("no batch params generated"); + assert_eq!(l1_batch_params.fee_input, fee_input); + assert_eq!(l1_batch_params.first_l2_block.timestamp, 2); +} diff --git a/core/node/state_keeper/src/io/tests/tester.rs b/core/node/state_keeper/src/io/tests/tester.rs index 02170283e94b..062fc426e8cc 100644 --- a/core/node/state_keeper/src/io/tests/tester.rs +++ b/core/node/state_keeper/src/io/tests/tester.rs @@ -25,7 +25,7 @@ use zksync_node_test_utils::{ use zksync_types::{ block::L2BlockHeader, commitment::L1BatchCommitmentMode, - fee_model::{BatchFeeInput, FeeModelConfig, FeeModelConfigV1}, + fee_model::{BatchFeeInput, FeeModelConfig, FeeModelConfigV2}, l2::L2Tx, protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, system_contracts::get_system_smart_contracts, @@ -97,8 +97,13 @@ impl Tester { MainNodeFeeInputProvider::new( gas_adjuster, Arc::new(NoOpRatioProvider::default()), - FeeModelConfig::V1(FeeModelConfigV1 { + FeeModelConfig::V2(FeeModelConfigV2 { minimal_l2_gas_price: self.minimal_l2_gas_price(), + compute_overhead_part: 1.0, + pubdata_overhead_part: 1.0, + batch_overhead_l1_gas: 10, + max_gas_per_batch: 500_000_000_000, + max_pubdata_per_batch: 100_000_000_000, }), ) } @@ -116,8 +121,13 @@ impl Tester { let batch_fee_input_provider = MainNodeFeeInputProvider::new( gas_adjuster, Arc::new(NoOpRatioProvider::default()), - FeeModelConfig::V1(FeeModelConfigV1 { + FeeModelConfig::V2(FeeModelConfigV2 { minimal_l2_gas_price: self.minimal_l2_gas_price(), + compute_overhead_part: 1.0, + pubdata_overhead_part: 1.0, + batch_overhead_l1_gas: 10, + max_gas_per_batch: 500_000_000_000, + max_pubdata_per_batch: 100_000_000_000, }), ); diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index 0cebc5d8b471..6211755eb156 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -32,7 +32,7 @@ pub mod l2_block_updates; #[derive(Debug)] pub struct UpdatesManager { batch_timestamp: u64, - fee_account_address: Address, + pub fee_account_address: Address, batch_fee_input: BatchFeeInput, base_fee_per_gas: u64, base_system_contract_hashes: BaseSystemContractsHashes, diff --git a/core/tests/recovery-test/src/index.ts b/core/tests/recovery-test/src/index.ts index 462404af6065..8567be6d6d30 100644 --- a/core/tests/recovery-test/src/index.ts +++ b/core/tests/recovery-test/src/index.ts @@ -84,9 +84,9 @@ export async function getExternalNodeHealth(url: string) { } } -export async function dropNodeData(env: { [key: string]: string }, useZkSupervisor?: boolean, chain?: string) { - if (useZkSupervisor) { - let cmd = 'zk_inception external-node init'; +export async function dropNodeData(env: { [key: string]: string }, useZkStack?: boolean, chain?: string) { + if (useZkStack) { + let cmd = 'zkstack external-node init'; cmd += chain ? ` --chain ${chain}` : ''; await executeNodeCommand(env, cmd); } else { @@ -176,7 +176,7 @@ export class NodeProcess { logsFile: FileHandle | string, pathToHome: string, components: NodeComponents = NodeComponents.STANDARD, - useZkInception?: boolean, + useZkStack?: boolean, chain?: string ) { const logs = typeof logsFile === 'string' ? await fs.open(logsFile, 'a') : logsFile; @@ -186,7 +186,7 @@ export class NodeProcess { stdio: ['ignore', logs.fd, logs.fd], cwd: pathToHome, env, - useZkInception, + useZkStack, chain }); diff --git a/core/tests/recovery-test/src/utils.ts b/core/tests/recovery-test/src/utils.ts index 98c6b6d4405c..c60f5603f17d 100644 --- a/core/tests/recovery-test/src/utils.ts +++ b/core/tests/recovery-test/src/utils.ts @@ -48,19 +48,19 @@ export function runExternalNodeInBackground({ stdio, cwd, env, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; env?: Parameters[0]['env']; - useZkInception?: boolean; + useZkStack?: boolean; chain?: string; }): ChildProcessWithoutNullStreams { let command = ''; - if (useZkInception) { - command = 'zk_inception external-node run'; + if (useZkStack) { + command = 'zkstack external-node run'; command += chain ? ` --chain ${chain}` : ''; } else { command = 'zk external-node --'; diff --git a/core/tests/recovery-test/tests/snapshot-recovery.test.ts b/core/tests/recovery-test/tests/snapshot-recovery.test.ts index cadf146c5226..eca0da78d782 100644 --- a/core/tests/recovery-test/tests/snapshot-recovery.test.ts +++ b/core/tests/recovery-test/tests/snapshot-recovery.test.ts @@ -458,10 +458,10 @@ async function decompressGzip(filePath: string): Promise { }); } -async function createSnapshot(zkSupervisor: boolean) { +async function createSnapshot(useZkStack: boolean) { let command = ''; - if (zkSupervisor) { - command = `zk_supervisor snapshot create`; + if (useZkStack) { + command = `zkstack dev snapshot create`; command += ` --chain ${fileConfig.chain}`; } else { command = `zk run snapshots-creator`; diff --git a/core/tests/revert-test/tests/utils.ts b/core/tests/revert-test/tests/utils.ts index ea8a45b97c37..fe5cb40799a4 100644 --- a/core/tests/revert-test/tests/utils.ts +++ b/core/tests/revert-test/tests/utils.ts @@ -51,19 +51,19 @@ export function runServerInBackground({ stdio, cwd, env, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; env?: Parameters[0]['env']; - useZkInception?: boolean; + useZkStack?: boolean; chain?: string; }): ChildProcessWithoutNullStreams { let command = ''; - if (useZkInception) { - command = 'zk_inception server'; + if (useZkStack) { + command = 'zkstack server'; if (chain) { command += ` --chain ${chain}`; } @@ -78,19 +78,19 @@ export function runExternalNodeInBackground({ stdio, cwd, env, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; env?: Parameters[0]['env']; - useZkInception?: boolean; + useZkStack?: boolean; chain?: string; }): ChildProcessWithoutNullStreams { let command = ''; - if (useZkInception) { - command = 'zk_inception external-node run'; + if (useZkStack) { + command = 'zkstack external-node run'; command += chain ? ` --chain ${chain}` : ''; } else { command = 'zk external-node'; @@ -334,7 +334,7 @@ export class NodeSpawner { stdio: ['ignore', logs, logs], cwd: pathToHome, env: env, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); @@ -362,7 +362,7 @@ export class NodeSpawner { stdio: ['ignore', logs, logs], cwd: pathToHome, env, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); diff --git a/core/tests/ts-integration/src/utils.ts b/core/tests/ts-integration/src/utils.ts index 7f67a9bcde30..28996e2015c8 100644 --- a/core/tests/ts-integration/src/utils.ts +++ b/core/tests/ts-integration/src/utils.ts @@ -14,21 +14,21 @@ export function runServerInBackground({ stdio, cwd, env, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: ProcessEnvOptions['cwd']; env?: ProcessEnvOptions['env']; - useZkInception?: boolean; + useZkStack?: boolean; newL1GasPrice?: string; newPubdataPrice?: string; chain?: string; }): ChildProcessWithoutNullStreams { let command = ''; - if (useZkInception) { - command = 'zk_inception server'; + if (useZkStack) { + command = 'zkstack server'; if (chain) { command += ` --chain ${chain}`; } @@ -280,7 +280,7 @@ export class NodeSpawner { stdio: ['ignore', logs, logs], cwd: pathToHome, env: env, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index 43a029bc2648..b1856eab0ec3 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -409,10 +409,10 @@ testFees('Test fees', function () { }); afterAll(async () => { - await testMaster.deinitialize(); + await mainNodeSpawner.killAndSpawnMainNode(); // Returning the pubdata price to the default one // Spawning with no options restores defaults. - await mainNodeSpawner.killAndSpawnMainNode(); + await testMaster.deinitialize(); __ZKSYNC_TEST_CONTEXT_OWNER__.setL2NodePid(mainNodeSpawner.mainNode!.proc.pid!); }); }); diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts index 2e223b9d7441..79a690a15802 100644 --- a/core/tests/upgrade-test/tests/upgrade.test.ts +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -64,9 +64,21 @@ describe('Upgrade test', function () { complexUpgraderAddress = '0x000000000000000000000000000000000000800f'; if (fileConfig.loadFromFile) { - const generalConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'general.yaml' }); - const contractsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'contracts.yaml' }); - const secretsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'secrets.yaml' }); + const generalConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'general.yaml' + }); + const contractsConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'contracts.yaml' + }); + const secretsConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'secrets.yaml' + }); ethProviderAddress = secretsConfig.l1.l1_rpc_url; web3JsonRpc = generalConfig.api.web3_json_rpc.http_url; @@ -89,7 +101,11 @@ describe('Upgrade test', function () { alice = tester.emptyWallet(); if (fileConfig.loadFromFile) { - const chainWalletConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'wallets.yaml' }); + const chainWalletConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'wallets.yaml' + }); adminGovWallet = new ethers.Wallet(chainWalletConfig.governor.private_key, alice._providerL1()); @@ -144,7 +160,7 @@ describe('Upgrade test', function () { components: serverComponents, stdio: ['ignore', logs, logs], cwd: pathToHome, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); // Server may need some time to recompile if it's a cold run, so wait for it. @@ -220,8 +236,15 @@ describe('Upgrade test', function () { }); step('Send l1 tx for saving new bootloader', async () => { - const path = `${pathToHome}/contracts/system-contracts/bootloader/build/artifacts/playground_batch.yul.zbin`; - const bootloaderCode = ethers.hexlify(fs.readFileSync(path)); + const path = `${pathToHome}/contracts/system-contracts/bootloader/build/artifacts/playground_batch.yul/playground_batch.yul.zbin`; + let bootloaderCode; + if (fs.existsSync(path)) { + bootloaderCode = '0x'.concat(fs.readFileSync(path).toString()); + } else { + const legacyPath = `${pathToHome}/contracts/system-contracts/bootloader/build/artifacts/playground_batch.yul.zbin`; + bootloaderCode = ethers.hexlify(fs.readFileSync(legacyPath)); + } + bootloaderHash = ethers.hexlify(zksync.utils.hashBytecode(bootloaderCode)); const txHandle = await tester.syncWallet.requestExecute({ contractAddress: ethers.ZeroAddress, @@ -354,7 +377,7 @@ describe('Upgrade test', function () { components: serverComponents, stdio: ['ignore', logs, logs], cwd: pathToHome, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); await utils.sleep(10); diff --git a/core/tests/upgrade-test/tests/utils.ts b/core/tests/upgrade-test/tests/utils.ts index 7a7829caf86b..9f130c1e5565 100644 --- a/core/tests/upgrade-test/tests/utils.ts +++ b/core/tests/upgrade-test/tests/utils.ts @@ -7,19 +7,19 @@ export function runServerInBackground({ components, stdio, cwd, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; - useZkInception?: boolean; + useZkStack?: boolean; chain?: string; }) { let command = ''; - if (useZkInception) { - command = 'zk_inception server'; + if (useZkStack) { + command = 'zkstack server'; command += chain ? ` --chain ${chain}` : ''; } else { command = 'cd $ZKSYNC_HOME && cargo run --bin zksync_server --release --'; @@ -71,8 +71,8 @@ export interface Contracts { stateTransitonManager: any; } -export function initContracts(pathToHome: string, zkToolbox: boolean): Contracts { - if (zkToolbox) { +export function initContracts(pathToHome: string, zkStack: boolean): Contracts { + if (zkStack) { const CONTRACTS_FOLDER = `${pathToHome}/contracts`; return { l1DefaultUpgradeAbi: new ethers.Interface( diff --git a/docker/Makefile b/docker/Makefile index c469587c8ffd..4e0ca51f904e 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -55,7 +55,7 @@ check-tools: check-nodejs check-yarn check-rust check-sqlx-cli check-docker chec # Check that contracts are checkout properly check-contracts: - @if [ ! -d ../contracts/l1-contracts/lib/forge-std/foundry.toml ] || [ -z "$$(ls -A ../contracts/l1-contracts/lib/forge-std/foundry.toml)" ]; then \ + @if [ -z "$$(ls -A ../contracts/l1-contracts/lib/forge-std/foundry.toml)" ]; then \ echo "l1-contracts git submodule is missing. Please re-download repo with 'git clone --recurse-submodules https://github.com/matter-labs/zksync-era.git'"; \ exit 1; \ fi @@ -65,9 +65,9 @@ check-contracts: prepare-contracts: check-tools check-contracts @cd ../ && \ export ZKSYNC_HOME=$$(pwd) && \ - export PATH=$$PATH:$${ZKSYNC_HOME}/bin && \ - zkt || true && \ - zk_supervisor contracts && \ + export PATH=$$PATH:$${ZKSYNC_HOME}/bin:$${ZKSYNC_HOME}/zkstack_cli/zkstackup && \ + zkstackup -g --local || true && \ + zkstack dev contracts && \ mkdir -p contracts/l1-contracts/artifacts # Download setup-key diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 7943dae835af..5688db2e3f52 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -91,6 +91,10 @@ RUN mkdir -p /etc/vyper-bin/0.3.10 \ && wget -O vyper0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10%2Bcommit.91361694.linux \ && mv vyper0.3.10 /etc/vyper-bin/0.3.10/vyper \ && chmod +x /etc/vyper-bin/0.3.10/vyper +RUN mkdir -p /etc/vyper-bin/0.4.0 \ + && wget -O vyper0.4.0 https://github.com/vyperlang/vyper/releases/download/v0.4.0/vyper.0.4.0+commit.e9db8d9f.linux \ + && mv vyper0.4.0 /etc/vyper-bin/0.4.0/vyper \ + && chmod +x /etc/vyper-bin/0.4.0/vyper COPY --from=builder /usr/src/zksync/target/release/zksync_contract_verifier /usr/bin/ COPY contracts/system-contracts/bootloader/build/artifacts/ /contracts/system-contracts/bootloader/build/artifacts/ diff --git a/docker/prover-autoscaler/Dockerfile b/docker/prover-autoscaler/Dockerfile new file mode 100644 index 000000000000..246e8099ffd3 --- /dev/null +++ b/docker/prover-autoscaler/Dockerfile @@ -0,0 +1,25 @@ +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder + +ARG DEBIAN_FRONTEND=noninteractive + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + +WORKDIR /usr/src/zksync +COPY . . + +RUN cd prover && cargo build --release --bin zksync_prover_autoscaler + +FROM ghcr.io/matter-labs/zksync-runtime-base:latest + +COPY --from=builder /usr/src/zksync/prover/target/release/zksync_prover_autoscaler /usr/bin/ + +ENTRYPOINT ["/usr/bin/zksync_prover_autoscaler"] diff --git a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile b/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile index 0c0fd7a9bb3d..90f089ba8bd4 100644 --- a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile +++ b/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile @@ -104,7 +104,7 @@ RUN wget -c https://sourceware.org/pub/valgrind/valgrind-3.20.0.tar.bz2 && \ # Setup the environment ENV ZKSYNC_HOME=/usr/src/zksync -ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" +ENV PATH="${ZKSYNC_HOME}/bin:${ZKSYNC_HOME}/zkstack_cli/zkstackup:${HOME}/.local/bin:${PATH}" ENV CI=1 RUN cargo install sccache ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache diff --git a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile b/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile index 5bd569b7d20b..b6b023a5b7f4 100644 --- a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile +++ b/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile @@ -93,7 +93,7 @@ RUN wget -c https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksol # Setup the environment ENV ZKSYNC_HOME=/usr/src/zksync -ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" +ENV PATH="${ZKSYNC_HOME}/bin:${ZKSYNC_HOME}/zkstack_cli/zkstackup:${HOME}/.local/bin:${PATH}" ENV CI=1 RUN cargo install sccache ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache diff --git a/docker/zk-environment/Dockerfile b/docker/zk-environment/Dockerfile index 53e532653111..c04e5720e4d7 100644 --- a/docker/zk-environment/Dockerfile +++ b/docker/zk-environment/Dockerfile @@ -164,7 +164,7 @@ RUN wget -c https://sourceware.org/pub/valgrind/valgrind-3.20.0.tar.bz2 && \ # Setup the environment ENV ZKSYNC_HOME=/usr/src/zksync -ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" +ENV PATH="${ZKSYNC_HOME}/bin:${ZKSYNC_HOME}/zkstack_cli/zkstackup:${HOME}/.local/bin:${PATH}" ENV CI=1 ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache diff --git a/docs/guides/external-node/00_quick_start.md b/docs/guides/external-node/00_quick_start.md index 67a1b89eef51..07e52085cf4f 100644 --- a/docs/guides/external-node/00_quick_start.md +++ b/docs/guides/external-node/00_quick_start.md @@ -6,6 +6,8 @@ Install `docker compose` and `Docker` ## Running ZKsync node locally +These commands start ZKsync node locally inside docker. + To start a mainnet instance, run: ```sh @@ -34,9 +36,10 @@ cd docker-compose-examples sudo docker compose --file testnet-external-node-docker-compose.yml down --volumes ``` -You can see the status of the node (after recovery) in [local grafana dashboard](http://localhost:3000/dashboards). +### Observability -Those commands start ZKsync node locally inside docker. +You can see the status of the node (after recovery) in [local grafana dashboard](http://localhost:3000/dashboards). You +can also access a debug page with more information about the node [here](http://localhost:5000). The HTTP JSON-RPC API can be accessed on port `3060` and WebSocket API can be accessed on port `3061`. diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json index be869ead40b2..74b4b8228016 100644 --- a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json +++ b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json @@ -24,7 +24,7 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 3, + "id": 2, "links": [], "liveNow": false, "panels": [ @@ -1005,7 +1005,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "description": "Rate of RPC client requests, in packets per second.", + "description": "Rate of RPC client requests, in requests per second.", "fieldConfig": { "defaults": { "color": { @@ -1054,7 +1054,7 @@ } ] }, - "unit": "pps" + "unit": "reqps" }, "overrides": [] }, @@ -1098,7 +1098,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "description": "Rate of RPC server responses, in packets per second.", + "description": "Rate of RPC server responses, in requests per second.", "fieldConfig": { "defaults": { "color": { @@ -1147,7 +1147,7 @@ } ] }, - "unit": "pps" + "unit": "reqps" }, "overrides": [] }, @@ -1202,6 +1202,6 @@ "timezone": "", "title": "Consensus", "uid": "STAAEORNk", - "version": 4, + "version": 2, "weekStart": "" } \ No newline at end of file diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json index d7177ae802ef..0b3cb681e3be 100644 --- a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json +++ b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json @@ -24,7 +24,7 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 2, + "id": 1, "links": [], "liveNow": false, "panels": [ @@ -103,13 +103,49 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 0, + "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" } }, "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, "unit": "bytes" }, "overrides": [] @@ -123,18 +159,11 @@ "id": 2, "options": { "legend": { + "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, - "pieType": "pie", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, "tooltip": { "mode": "single", "sort": "none" @@ -167,7 +196,7 @@ } ], "title": "Total disk space usage", - "type": "piechart" + "type": "timeseries" }, { "datasource": { @@ -409,6 +438,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "description": "Shows the batch numbers on the local node and the server node.", "fieldConfig": { "defaults": { "color": { @@ -421,7 +451,7 @@ "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", - "fillOpacity": 33, + "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, @@ -470,13 +500,13 @@ "x": 12, "y": 16 }, - "id": 4, + "id": 39, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": false + "showLegend": true }, "tooltip": { "mode": "single", @@ -489,14 +519,28 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "editorMode": "builder", "exemplar": true, - "expr": "sum by (stage) (external_node_sync_lag)", + "expr": "sum by(stage) (external_node_fetcher_l1_batch{stage=\"open\"})", "interval": "", - "legendFormat": "", + "legendFormat": "Server", + "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "zksync_consensus_storage_batch_store_next_persisted_batch", + "hide": false, + "legendFormat": "Local", + "range": true, + "refId": "B" } ], - "title": "Sync lag (blocks)", + "title": "L1 batch sync lag", "transformations": [], "type": "timeseries" }, @@ -546,8 +590,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -598,7 +641,6 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "description": "The percentage of transactions that are being reverted or that are succeeding.", "fieldConfig": { "defaults": { "color": { @@ -610,8 +652,8 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "drawStyle": "bars", - "fillOpacity": 0, + "drawStyle": "line", + "fillOpacity": 33, "gradientMode": "none", "hideFrom": { "legend": false, @@ -619,16 +661,19 @@ "viz": false }, "lineInterpolation": "linear", - "lineWidth": 2, + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", - "spanNulls": false, + "spanNulls": true, "stacking": { "group": "A", - "mode": "percent" + "mode": "none" }, "thresholdsStyle": { "mode": "off" @@ -639,8 +684,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -657,13 +701,13 @@ "x": 12, "y": 24 }, - "id": 38, + "id": 4, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": true + "showLegend": false }, "tooltip": { "mode": "single", @@ -676,14 +720,15 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "editorMode": "builder", - "expr": "sum by(status) (increase(server_state_keeper_tx_execution_result[1h]))", - "legendFormat": "__auto", - "range": true, + "exemplar": true, + "expr": "sum by (stage) (external_node_sync_lag)", + "interval": "", + "legendFormat": "", "refId": "A" } ], - "title": "Transactions execution status (%)", + "title": "L2 blocks sync lag", + "transformations": [], "type": "timeseries" }, { @@ -731,8 +776,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -778,6 +822,98 @@ "title": "Avg number of transactions in L2 block", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "The percentage of transactions that are being reverted or that are succeeding.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "percent" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 32 + }, + "id": 38, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "sum by(status) (increase(server_state_keeper_tx_execution_result[1h]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Transactions execution status (%)", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", @@ -823,8 +959,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -839,7 +974,7 @@ "h": 8, "w": 12, "x": 12, - "y": 32 + "y": 40 }, "id": 34, "options": { @@ -886,6 +1021,6 @@ "timezone": "", "title": "General", "uid": "1", - "version": 9, + "version": 3, "weekStart": "" } \ No newline at end of file diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml index 65f33c78b0e0..fac65298bbc0 100644 --- a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml +++ b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml @@ -5,6 +5,7 @@ providers: orgId: 1 folder: '' type: file + allowUiUpdates: true disableDeletion: false updateIntervalSeconds: 10 # How often Grafana will scan for changed dashboards options: diff --git a/docs/specs/README.md b/docs/specs/README.md index 1f163bf7845f..d0b087ae93e3 100644 --- a/docs/specs/README.md +++ b/docs/specs/README.md @@ -33,4 +33,4 @@ 1. [ZK Chain ecosystem](./zk_chains/README.md) - [Overview](./zk_chains/overview.md) - [Shared Bridge](./zk_chains/shared_bridge.md) - - [Hyperbridges](./zk_chains/hyperbridges.md) + - [Interop](./zk_chains/interop.md) diff --git a/docs/specs/zk_chains/README.md b/docs/specs/zk_chains/README.md index 4de575899dd1..ce0a7c311a2f 100644 --- a/docs/specs/zk_chains/README.md +++ b/docs/specs/zk_chains/README.md @@ -2,4 +2,4 @@ - [Overview](./overview.md) - [Shared Bridge](./shared_bridge.md) -- [Hyperbridges](./hyperbridges.md) +- [Interop](./interop.md) diff --git a/docs/specs/zk_chains/gateway.md b/docs/specs/zk_chains/gateway.md new file mode 100644 index 000000000000..f4ee68e242e6 --- /dev/null +++ b/docs/specs/zk_chains/gateway.md @@ -0,0 +1 @@ +# Gateway diff --git a/docs/specs/zk_chains/hyperbridges.md b/docs/specs/zk_chains/hyperbridges.md deleted file mode 100644 index 614fe61427e3..000000000000 --- a/docs/specs/zk_chains/hyperbridges.md +++ /dev/null @@ -1,41 +0,0 @@ -# Hyperbridges - -## Introduction - -In the Shared bridge document we described how the L1 smart contracts work to support multiple chains, and we emphasized -that the core feature is hyperbridging, but we did not outline the hyperbridges themselves. This is because hyperbridges -are mostly L2 contracts. In this document we describe what hyperbridges are, and specify the necessary infrastructure. - -### Hyperbridge description - -Hyperbridges are trustless and cheap general native bridges between ZK Chains, allowing cross-chain function calls. -Trustlessness is achieved by relying on the main ZK Chain bridge to send a compressed message to L1, which is then sent -to and expanded on the destination ZK Chain. - -Technically they are a system of smart contracts that build on top of the enshrined L1<>L2 validating bridges, and can -interpret messages sent from L2 to L2 by verifying Merkle proofs. They are built alongside the protocol, they can -transfer the native asset of the ecosystem, and they can be used for asynchronous function calls between ZK Chains. - -![Hyperbridges](./img/hyperbridges.png) - -The trustless nature of hyperbridges allows the ecosystem to resemble a single VM. To illustrate imagine a new ZK Chain -joining the ecosystem. We will want ether/Dai/etc. to be accessible on this ZK Chain. This can be done automatically. -There will be a central erc20 deployer contract in the ecosystem, which will deploy the new ERC20 contract via the -hyperbridge. After the contract is deployed it will be able to interact other Dai contracts in the ecosystem. - -### High Level design - -![Hyperbridging](./img/hyperbridging.png) - -### L1 - -For the larger context see the [Shared Bridge](./shared_bridge.md) document, here we will focus on - -- HyperMailbox (part of Bridgehub). Contains the Hyperroot, root of Merkle tree of Hyperlogs. Hyperlogs are the L2->L1 - SysLogs that record the sent hyperbridge messages from the L2s. - -### L2 Contracts - -- Outbox system contract. It collects the hyperbridge txs into the hyperlog of the ZK Chain. -- Inbox system contract. This is where the hyperroot is imported and sent to L1 for settlement. Merkle proofs are - verified here, tx calls are started from here, nullifiers are stored here (add epochs later) diff --git a/docs/specs/zk_chains/interop.md b/docs/specs/zk_chains/interop.md new file mode 100644 index 000000000000..947742909b8d --- /dev/null +++ b/docs/specs/zk_chains/interop.md @@ -0,0 +1,49 @@ +# Interop + +## Introduction + +In the Shared bridge document we described how the L1 smart contracts work to support multiple chains, and we emphasized +that the core feature is interop. Interop happens via the same L1->L2 interface as described in the L1SharedBridge doc. +There is (with the interop upgrade) a Bridgehub, AssetRouter, NativeTokenVault and Nullifier deployed on every L2, and +they serve the same feature as their L1 counterparts. Namely: + +- The Bridgehub is used to start the transaction. +- The AssetRouter and NativeTokenVault are the bridge contract that handle the tokens. +- The Nullifier is used to prevent reexecution of xL2 txs. + +### Interop process + +![Interop](./img/hyperbridging.png) + +The interop process has 7 main steps, each with its substeps: + +1. Starting the transaction on the sending chain + + - The user/calls calls the Bridgehub contract. If they want to use a bridge they call + `requestL2TransactionTwoBridges`, if they want to make a direct call they call `requestL2TransactionDirect` + function. + - The Bridgehub collects the base token fees necessary for the interop tx to be processed on the destination chain, + and if using the TwoBridges method the calldata and the destination contract ( for more data see Shared bridge + doc). + - The Bridgehub emits a `NewPriorityRequest` event, this is the same as the one in our Mailbox contract. This event + specifies the xL2 txs, which uses the same format as L1->L2 txs. This event can be picked up and used to receive + the txs. + - This new priority request is sent as an L2->L1 message, it is included in the chains merkle tree of emitted txs. + +2. The chain settles its proof on L1 or the Gateway, whichever is used as the settlement layer for the chain. +3. On the Settlement Layer (SL), the MessageRoot is updated in the MessageRoot contract. The new data includes all the + L2->L1 messages that are emitted from the settling chain. +4. The receiving chain picks up the updated MessgeRoot from the Settlement Layer. +5. Now the xL2 txs can be imported on the destination chain. Along with the txs, a merkle proof needs to be sent to link + it to the MessageRoot. +6. Receiving the tx on the destination chain + + - On the destination chain the xL2 txs is verified. This means the merkle proof is checked agains the MessageRoot. + This shows the the xL2 txs was indeed sent. + - After this the txs can be executed. The tx hash is stored in the L2Nullifier contract, so that the txs cannot be + replayed. + - The specified contract is called, with the calldata, and the message sender = + `keccak256(originalMessageSender, originChainId) >> 160`. This is to prevent the collision of the msg.sender + addresses. + +7. The destination chain settles on the SL and the MessageRoot that it imported is checked. diff --git a/docs/specs/zk_chains/shared_bridge.md b/docs/specs/zk_chains/shared_bridge.md index c464a7a154bf..b43d3082b621 100644 --- a/docs/specs/zk_chains/shared_bridge.md +++ b/docs/specs/zk_chains/shared_bridge.md @@ -17,7 +17,7 @@ If you want to know more about ZK Chains, check this We want to create a system where: - ZK Chains should be launched permissionlessly within the ecosystem. -- Hyperbridges should enable unified liquidity for assets across the ecosystem. +- Interop should enable unified liquidity for assets across the ecosystem. - Multi-chain smart contracts need to be easy to develop, which means easy access to traditional bridges, and other supporting architecture. @@ -58,20 +58,19 @@ be able to leverage them when available). #### Bridgehub - Acts as a hub for bridges, so that they have a single point of communication with all ZK Chain contracts. This allows - L1 assets to be locked in the same contract for all ZK Chains, including L3s and validiums. The `Bridgehub` also - implements the following: + L1 assets to be locked in the same contract for all ZK Chains. The `Bridgehub` also implements the following features: - `Registry` This is where ZK Chains can register, starting in a permissioned manner, but with the goal to be - permissionless in the future. This is where their `chainID` is determined. L3s will also register here. This - `Registry` is also where State Transition contracts should register. Each chain has to specify its desired ST when - registering (Initially, only one will be available). + permissionless in the future. This is where their `chainID` is determined. Chains on Gateway will also register here. + This `Registry` is also where Chain Type Manager contracts should register. Each chain has to specify its desired CTM + when registering (Initially, only one will be available). ``` function newChain( uint256 _chainId, - address _stateTransition + address _chainTypeManager ) external returns (uint256 chainId); - function newStateTransition(address _stateTransition) external; + function newChainTypeManager(address _chainTypeManager) external; ``` - `BridgehubMailbox` routes messages to the Diamond proxy’s Mailbox facet based on chainID @@ -79,43 +78,73 @@ be able to leverage them when available). - Same as the current zkEVM [Mailbox](https://github.com/matter-labs/era-contracts/blob/main/l1-contracts/contracts/zksync/facets/Mailbox.sol), just with chainId, - - Ether needs to be deposited and withdrawn from here. - This is where L2 transactions can be requested. ``` - function requestL2Transaction( - uint256 _chainId, - address _contractL2, - uint256 _l2Value, - bytes calldata _calldata, - uint256 _l2GasLimit, - uint256 _l2GasPerPubdataByteLimit, - bytes[] calldata _factoryDeps, - address _refundRecipient - ) public payable override returns (bytes32 canonicalTxHash) { - address proofChain = bridgeheadStorage.proofChain[_chainId]; - canonicalTxHash = IProofChain(proofChain).requestL2TransactionBridgehead( - _chainId, - msg.value, - msg.sender, - _contractL2, - _l2Value, - _calldata, - _l2GasLimit, - _l2GasPerPubdataByteLimit, - _factoryDeps, - _refundRecipient - ); - } + function requestL2TransactionTwoBridges( + L2TransactionRequestTwoBridgesOuter calldata _request + ) ``` -- `Hypermailbox` - - This will allow general message passing (L2<>L2, L2<>L3, etc). This is where the `Mailbox` sends the `Hyperlogs`. - `Hyperlogs` are commitments to these messages sent from a single ZK Chain. `Hyperlogs` are aggregated into a - `HyperRoot` in the `HyperMailbox`. - - This component has not been implemented yet + ``` + struct L2TransactionRequestTwoBridgesOuter { + uint256 chainId; + uint256 mintValue; + uint256 l2Value; + uint256 l2GasLimit; + uint256 l2GasPerPubdataByteLimit; + address refundRecipient; + address secondBridgeAddress; + uint256 secondBridgeValue; + bytes secondBridgeCalldata; + } + ``` -#### Main asset shared bridges +``` + struct L2TransactionRequestTwoBridgesInner { + bytes32 magicValue; + address l2Contract; + bytes l2Calldata; + bytes[] factoryDeps; + bytes32 txDataHash; +} +``` + +- The `requestL2TransactionTwoBridges` function should be used most of the time when bridging to a chain ( the exeption + is when the user bridges directly to a contract on the L2, without using a bridge contract on L1). The logic of it is + the following: + + - The user wants to bridge to chain with the provided `L2TransactionRequestTwoBridgesOuter.chainId`. + - Two bridges are called, the baseTokenBridge (i.e. the L1SharedBridge or L1AssetRouter after the Gateway upgrade) and + an arbitrary second bridge. The Bridgehub will provide the original caller address to both bridges, which can + request that the appropriate amount of tokens are transferred from the caller to the bridge. The caller has to set + the appropriate allowance for both bridges. (Often the bridges coincide, but they don't have to). + - The `L2TransactionRequestTwoBridgesOuter.mintValue` is the amount of baseTokens that will be minted on L2. This is + the amount of tokens that the baseTokenBridge will request from the user. If the baseToken is Eth, it will be + forwarded to the baseTokenBridge. + - The `L2TransactionRequestTwoBridgesOuter.l2Value` is the amount of tokens that will be deposited on L2. The second + bridge and the Mailbox receives this as an input (although our second bridge does not use the value). + - The `L2TransactionRequestTwoBridgesOuter.l2GasLimit` is the maximum amount of gas that will be spent on L2 to + complete the transaction. The Mailbox receives this as an input. + - The `L2TransactionRequestTwoBridgesOuter.l2GasPerPubdataByteLimit` is the maximum amount of gas per pubdata byte + that will be spent on L2 to complete the transaction. The Mailbox receives this as an input. + - The `L2TransactionRequestTwoBridgesOuter.refundRecipient` is the address that will be refunded for the gas spent on + L2. The Mailbox receives this as an input. + - The `L2TransactionRequestTwoBridgesOuter.secondBridgeAddress` is the address of the second bridge that will be + called. This is the arbitrary address that is called from the Bridgehub. + - The `L2TransactionRequestTwoBridgesOuter.secondBridgeValue` is the amount of tokens that will be deposited on L2. + The second bridge receives this value as the baseToken (i.e. Eth on L1). + - The `L2TransactionRequestTwoBridgesOuter.secondBridgeCalldata` is the calldata that will be passed to the second + bridge. This is the arbitrary calldata that is passed from the Bridgehub to the second bridge. + - The secondBridge returns the `L2TransactionRequestTwoBridgesInner` struct to the Bridgehub. This is also passed to + the Mailbox as input. This is where the destination contract, calldata, factoryDeps are determined on the L2. + + This setup allows the user to bridge the baseToken of the origin chain A to a chain B with some other baseToken, by + specifying the A's token in the secondBridgeValue, which will be minted on the destination chain as an ERC20 token, + and specifying the amount of B's token in the mintValue, which will be minted as the baseToken and used to cover the + gas costs. + +#### Main asset shared bridges L2TransactionRequestTwoBridgesInner - Some assets have to be natively supported (ETH, WETH) and it also makes sense to support some generally accepted token standards (ERC20 tokens), as this makes it easy to bridge those tokens (and ensures a single version of them exists on @@ -147,25 +176,18 @@ be able to leverage them when available). ); ``` -This topic is now covered more thoroughly by the Custom native token discussion. - -[Custom native token compatible with Hyperbridging](https://www.notion.so/Custom-native-token-compatible-with-Hyperbridging-54e190a1a76f44248cf84a38304a0641?pvs=21) +#### Chain Type Manager -#### State Transition - -- `StateTransition` A state transition manages proof verification and DA for multiple chains. It also implements the +- `ChainTypeManager` A chain type manager manages proof verification and DA for multiple chains. It also implements the following functionalities: - - `StateTransitionRegistry` The ST is shared for multiple chains, so initialization and upgrades have to be the same - for all chains. Registration is not permissionless but happens based on the registrations in the bridgehub’s - `Registry`. At registration a `DiamondProxy` is deployed and initialized with the appropriate `Facets` for each ZK - Chain. + - `ChainTypeRegistry` The ST is shared for multiple chains, so initialization and upgrades have to be the same for all + chains. Registration is not permissionless but happens based on the registrations in the bridgehub’s `Registry`. At + registration a `DiamondProxy` is deployed and initialized with the appropriate `Facets` for each ZK Chain. - `Facets` and `Verifier` are shared across chains that relies on the same ST: `Base`, `Executor` , `Getters`, `Admin` , `Mailbox.`The `Verifier` is the contract that actually verifies the proof, and is called by the `Executor`. - Upgrade Mechanism The system requires all chains to be up-to-date with the latest implementation, so whenever an update is needed, we have to “force” each chain to update, but due to decentralization, we have to give each chain a - time frame (more information in the - [Upgrade Mechanism](https://www.notion.so/ZK-Stack-shared-bridge-alpha-version-a37c4746f8b54fb899d67e474bfac3bb?pvs=21) - section). This is done in the update mechanism contract, this is where the bootloader and system contracts are + time frame. This is done in the update mechanism contract, this is where the bootloader and system contracts are published, and the `ProposedUpgrade` is stored. Then each chain can call this upgrade for themselves as needed. After the deadline is over, the not-updated chains are frozen, that is, cannot post new proofs. Frozen chains can unfreeze by updating their proof system. @@ -180,6 +202,7 @@ This topic is now covered more thoroughly by the Custom native token discussion. - A chain might implement its own specific consensus mechanism. This needs its own contracts. Only this contract will be able to submit proofs to the State Transition contract. +- DA contracts. - Currently, the `ValidatorTimelock` is an example of such a contract. ### Components interactions @@ -199,22 +222,6 @@ features required to process proofs. The chain ID is set in the VM in a special -#### WETH Contract - -Ether, the native gas token is part of the core system contracts, so deploying it is not necessary. But WETH is just a -smart contract, it needs to be deployed and initialised. This happens from the L1 WETH bridge. This deploys on L2 the -corresponding bridge and ERC20 contract. This is deployed from L1, but the L2 address is known at deployment time. - -![deployWeth.png](./img/deployWeth.png) - -#### Deposit WETH - -The user can deposit WETH into the ecosystem using the WETH bridge on L1. The destination chain ID has to be specified. -The Bridgehub unwraps the WETH, and keeps the ETH, and send a message to the destination L2 to mint WETH to the -specified address. - -![depositWeth.png](./img/depositWeth.png) - --- ### Common Standards and Upgrades diff --git a/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol b/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol index 5f4de59681fd..baa0d37b7530 100644 --- a/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol +++ b/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol @@ -68,7 +68,7 @@ contract MockContractDeployer { Version1 } - address constant CODE_ORACLE_ADDR = address(0x8012); + IAccountCodeStorage constant ACCOUNT_CODE_STORAGE_CONTRACT = IAccountCodeStorage(address(0x8002)); MockKnownCodeStorage constant KNOWN_CODE_STORAGE_CONTRACT = MockKnownCodeStorage(address(0x8004)); /// The returned value is obviously incorrect in the general case, but works well enough when called by the bootloader. @@ -78,15 +78,166 @@ contract MockContractDeployer { /// Replaces real deployment with publishing a surrogate EVM "bytecode". /// @param _salt bytecode hash - /// @param _bytecodeHash ignored, since it's not possible to set arbitrarily /// @param _input bytecode to publish function create( bytes32 _salt, - bytes32 _bytecodeHash, + bytes32, // ignored, since it's not possible to set arbitrarily bytes calldata _input ) external payable returns (address) { KNOWN_CODE_STORAGE_CONTRACT.setEVMBytecodeHash(_salt); KNOWN_CODE_STORAGE_CONTRACT.publishEVMBytecode(_input); - return address(0); + address newAddress = address(uint160(msg.sender) + 1); + ACCOUNT_CODE_STORAGE_CONTRACT.storeAccountConstructedCodeHash(newAddress, _salt); + return newAddress; + } +} + +interface IAccountCodeStorage { + function getRawCodeHash(address _address) external view returns (bytes32); + function storeAccountConstructedCodeHash(address _address, bytes32 _hash) external; +} + +interface IRecursiveContract { + function recurse(uint _depth) external returns (uint); +} + +/// Native incrementing library. Not actually a library to simplify deployment. +contract IncrementingContract { + // Should not collide with other storage slots + uint constant INCREMENTED_SLOT = 0x123; + + function getIncrementedValue() public view returns (uint _value) { + assembly { + _value := sload(INCREMENTED_SLOT) + } + } + + function increment(address _thisAddress, uint _thisBalance) external { + require(msg.sender == tx.origin, "msg.sender not retained"); + require(address(this) == _thisAddress, "this address"); + require(address(this).balance == _thisBalance, "this balance"); + assembly { + sstore(INCREMENTED_SLOT, add(sload(INCREMENTED_SLOT), 1)) + } + } + + /// Tests delegation to a native or EVM contract at the specified target. + function testDelegateCall(address _target) external { + uint valueSnapshot = getIncrementedValue(); + (bool success, ) = _target.delegatecall(abi.encodeCall( + IncrementingContract.increment, + (address(this), address(this).balance) + )); + require(success, "delegatecall reverted"); + require(getIncrementedValue() == valueSnapshot + 1, "invalid value"); + } + + function testStaticCall(address _target, uint _expectedValue) external { + (bool success, bytes memory rawValue) = _target.staticcall(abi.encodeCall( + this.getIncrementedValue, + () + )); + require(success, "static call reverted"); + (uint value) = abi.decode(rawValue, (uint)); + require(value == _expectedValue, "value mismatch"); + + (success, ) = _target.staticcall(abi.encodeCall( + IncrementingContract.increment, + (address(this), address(this).balance) + )); + require(!success, "staticcall should've reverted"); + } +} + +uint constant EVM_EMULATOR_STIPEND = 1 << 30; + +/** + * Mock EVM emulator used in low-level tests. + */ +contract MockEvmEmulator is IRecursiveContract, IncrementingContract { + IAccountCodeStorage constant ACCOUNT_CODE_STORAGE_CONTRACT = IAccountCodeStorage(address(0x8002)); + + /// Set to `true` for testing logic sanity. + bool isUserSpace; + + modifier validEvmEntry() { + if (!isUserSpace) { + require(gasleft() >= EVM_EMULATOR_STIPEND, "no stipend"); + // Fetch bytecode for the executed contract. + bytes32 bytecodeHash = ACCOUNT_CODE_STORAGE_CONTRACT.getRawCodeHash(address(this)); + require(bytecodeHash != bytes32(0), "called contract not deployed"); + uint bytecodeVersion = uint(bytecodeHash) >> 248; + require(bytecodeVersion == 2, "non-EVM bytecode"); + + // Check that members of the current address are well-defined. + require(address(this).code.length != 0, "invalid code"); + require(address(this).codehash == bytecodeHash, "bytecode hash mismatch"); + } + _; + } + + function testPayment(uint _expectedValue, uint _expectedBalance) public payable validEvmEntry { + require(msg.value == _expectedValue, "unexpected msg.value"); + require(address(this).balance == _expectedBalance, "unexpected balance"); + } + + IRecursiveContract recursionTarget; + + function recurse(uint _depth) public validEvmEntry returns (uint) { + require(gasleft() < 2 * EVM_EMULATOR_STIPEND, "stipend provided multiple times"); + + if (_depth <= 1) { + return 1; + } else { + IRecursiveContract target = (address(recursionTarget) == address(0)) ? this : recursionTarget; + // The real emulator limits amount of gas when performing far calls by EVM gas, so we emulate this behavior as well. + uint gasToSend = isUserSpace ? gasleft() : (gasleft() - EVM_EMULATOR_STIPEND); + return target.recurse{gas: gasToSend}(_depth - 1) * _depth; + } + } + + function testRecursion(uint _depth, uint _expectedValue) external validEvmEntry returns (uint) { + require(recurse(_depth) == _expectedValue, "incorrect recursion"); + } + + function testExternalRecursion(uint _depth, uint _expectedValue) external validEvmEntry returns (uint) { + recursionTarget = new NativeRecursiveContract(IRecursiveContract(this)); + uint returnedValue = recurse(_depth); + recursionTarget = this; // This won't work on revert, but for tests, it's good enough + require(returnedValue == _expectedValue, "incorrect recursion"); + } + + MockContractDeployer constant CONTRACT_DEPLOYER_CONTRACT = MockContractDeployer(address(0x8006)); + + /// Emulates EVM contract deployment and a subsequent call to it in a single transaction. + function testDeploymentAndCall(bytes32 _evmBytecodeHash, bytes calldata _evmBytecode) external validEvmEntry { + IRecursiveContract newContract = IRecursiveContract(CONTRACT_DEPLOYER_CONTRACT.create( + _evmBytecodeHash, + _evmBytecodeHash, + _evmBytecode + )); + require(uint160(address(newContract)) == uint160(address(this)) + 1, "unexpected address"); + require(address(newContract).code.length > 0, "contract code length"); + require(address(newContract).codehash != bytes32(0), "contract code hash"); + + uint gasToSend = gasleft() - EVM_EMULATOR_STIPEND; + require(newContract.recurse{gas: gasToSend}(5) == 120, "unexpected recursive result"); + } + + fallback() external validEvmEntry { + require(msg.data.length == 0, "unsupported call"); + } +} + +contract NativeRecursiveContract is IRecursiveContract { + IRecursiveContract target; + + constructor(IRecursiveContract _target) { + target = _target; + } + + function recurse(uint _depth) external returns (uint) { + require(gasleft() < EVM_EMULATOR_STIPEND, "stipend spilled to native contract"); + return (_depth <= 1) ? 1 : target.recurse(_depth - 1) * _depth; } } diff --git a/etc/env/consensus_config.yaml b/etc/env/consensus_config.yaml index 304ea31fac9c..2564865eeb31 100644 --- a/etc/env/consensus_config.yaml +++ b/etc/env/consensus_config.yaml @@ -1,3 +1,4 @@ +port: 3054 server_addr: "127.0.0.1:3054" public_addr: "127.0.0.1:3054" max_payload_size: 2500000 diff --git a/etc/env/en_consensus_config.yaml b/etc/env/en_consensus_config.yaml index f759e72e891c..5c428866cb6c 100644 --- a/etc/env/en_consensus_config.yaml +++ b/etc/env/en_consensus_config.yaml @@ -1,3 +1,4 @@ +port: 3055 server_addr: '127.0.0.1:3055' public_addr: '127.0.0.1:3055' max_payload_size: 2500000 diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 96bf29201142..620d3ab1a023 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -375,3 +375,10 @@ da_dispatcher: external_proof_integration_api: http_port: 3073 + +consensus: + port: 3054 + server_addr: "127.0.0.1:3054" + public_addr: "127.0.0.1:3054" + max_payload_size: 2500000 + gossip_dynamic_inbound_limit: 100 diff --git a/etc/nix/tee_prover.nix b/etc/nix/tee_prover.nix index 0b424522dffb..55545d1bb8e4 100644 --- a/etc/nix/tee_prover.nix +++ b/etc/nix/tee_prover.nix @@ -1,12 +1,19 @@ -{ cargoArtifacts -, craneLib +{ craneLib , commonArgs }: -craneLib.buildPackage (commonArgs // { +let pname = "zksync_tee_prover"; + cargoExtraArgs = "--locked -p zksync_tee_prover"; +in +craneLib.buildPackage (commonArgs // { + inherit pname; version = (builtins.fromTOML (builtins.readFile ../../core/bin/zksync_tee_prover/Cargo.toml)).package.version; - cargoExtraArgs = "-p zksync_tee_prover --bin zksync_tee_prover"; - inherit cargoArtifacts; + inherit cargoExtraArgs; + + cargoArtifacts = craneLib.buildDepsOnly (commonArgs // { + inherit pname; + inherit cargoExtraArgs; + }); postInstall = '' strip $out/bin/zksync_tee_prover diff --git a/etc/nix/zksync.nix b/etc/nix/zksync.nix index c5fffc48b09d..1ecac58b5d91 100644 --- a/etc/nix/zksync.nix +++ b/etc/nix/zksync.nix @@ -1,12 +1,14 @@ -{ cargoArtifacts -, craneLib +{ craneLib , commonArgs }: craneLib.buildPackage (commonArgs // { pname = "zksync"; version = (builtins.fromTOML (builtins.readFile ../../core/bin/zksync_tee_prover/Cargo.toml)).package.version; cargoExtraArgs = "--all"; - inherit cargoArtifacts; + + cargoArtifacts = craneLib.buildDepsOnly (commonArgs // { + pname = "zksync-era-workspace"; + }); outputs = [ "out" diff --git a/flake.nix b/flake.nix index ef618816f9c9..8c08e880910d 100644 --- a/flake.nix +++ b/flake.nix @@ -47,7 +47,7 @@ packages = { # to ease potential cross-compilation, the overlay is used inherit (appliedOverlay.zksync-era) zksync tee_prover container-tee-prover-azure container-tee-prover-dcap; - default = appliedOverlay.zksync-era.zksync; + default = appliedOverlay.zksync-era.tee_prover; }; devShells.default = appliedOverlay.zksync-era.devShell; @@ -91,7 +91,7 @@ ./Cargo.toml ./core ./prover - ./zk_toolbox + ./zkstack_cli ./.github/release-please/manifest.json ]; }; @@ -107,10 +107,6 @@ strictDeps = true; inherit hardeningEnable; }; - - cargoArtifacts = craneLib.buildDepsOnly (commonArgs // { - pname = "zksync-era-workspace"; - }); in { zksync-era = rec { @@ -120,12 +116,11 @@ }; zksync = pkgs.callPackage ./etc/nix/zksync.nix { - inherit cargoArtifacts; inherit craneLib; inherit commonArgs; }; + tee_prover = pkgs.callPackage ./etc/nix/tee_prover.nix { - inherit cargoArtifacts; inherit craneLib; inherit commonArgs; }; diff --git a/infrastructure/zk/src/fmt.ts b/infrastructure/zk/src/fmt.ts index e58cdbc8e547..b9f7f1b9d60b 100644 --- a/infrastructure/zk/src/fmt.ts +++ b/infrastructure/zk/src/fmt.ts @@ -48,7 +48,7 @@ export async function rustfmt(check: boolean = false) { const dirs = [ process.env.ZKSYNC_HOME as string, `${process.env.ZKSYNC_HOME}/prover`, - `${process.env.ZKSYNC_HOME}/zk_toolbox` + `${process.env.ZKSYNC_HOME}/zkstack_cli` ]; for (const dir of dirs) { diff --git a/infrastructure/zk/src/lint.ts b/infrastructure/zk/src/lint.ts index 7a24881c0f96..49ae4d0753ec 100644 --- a/infrastructure/zk/src/lint.ts +++ b/infrastructure/zk/src/lint.ts @@ -38,12 +38,12 @@ async function proverClippy() { await utils.spawn('cargo clippy --tests --locked -- -D warnings'); } -async function toolboxClippy() { - process.chdir(`${process.env.ZKSYNC_HOME}/zk_toolbox`); +async function zkstackClippy() { + process.chdir(`${process.env.ZKSYNC_HOME}/zkstack_cli`); await utils.spawn('cargo clippy --tests --locked -- -D warnings'); } -const ARGS = [...EXTENSIONS, 'rust', 'prover', 'contracts', 'toolbox'] as const; +const ARGS = [...EXTENSIONS, 'rust', 'prover', 'contracts', 'zkstack_cli'] as const; export const command = new Command('lint') .description('lint code') @@ -61,8 +61,8 @@ export const command = new Command('lint') case 'contracts': await lintContracts(cmd.check); break; - case 'toolbox': - await toolboxClippy(); + case 'zkstack_cli': + await zkstackClippy(); break; default: await lint(extension, cmd.check); @@ -72,7 +72,7 @@ export const command = new Command('lint') promises.push(lintContracts(cmd.check)); promises.push(clippy()); promises.push(proverClippy()); - promises.push(toolboxClippy()); + promises.push(zkstackClippy()); await Promise.all(promises); } }); diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 22ec5c534858..1d584a473d96 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -46,6 +46,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", + "getrandom", "once_cell", "version_check", "zerocopy", @@ -208,6 +209,18 @@ dependencies = [ "wait-timeout", ] +[[package]] +name = "async-broadcast" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cd0e2e25ea8e5f7e9df04578dc6cf5c83577fd09b1a46aaf5c85e1c33f2a7e" +dependencies = [ + "event-listener", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-stream" version = "0.3.5" @@ -275,9 +288,9 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "aws-lc-rs" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a47f2fb521b70c11ce7369a6c5fa4bd6af7e5d62ec06303875bafe7c6ba245" +checksum = "4ae74d9bd0a7530e8afd1770739ad34b36838829d6ad61818f9230f683f5ad77" dependencies = [ "aws-lc-sys", "mirai-annotations", @@ -287,9 +300,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.19.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2927c7af777b460b7ccd95f8b67acd7b4c04ec8896bf0c8e80ba30523cffc057" +checksum = "0f0e249228c6ad2d240c2dc94b714d711629d52bad946075d8e9b2f5391f0703" dependencies = [ "bindgen 0.69.4", "cc", @@ -355,6 +368,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "backoff" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" +dependencies = [ + "getrandom", + "instant", + "rand 0.8.5", +] + [[package]] name = "backtrace" version = "0.3.72" @@ -1331,8 +1355,18 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.13.4", + "darling_macro 0.13.4", +] + +[[package]] +name = "darling" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +dependencies = [ + "darling_core 0.20.10", + "darling_macro 0.20.10", ] [[package]] @@ -1349,17 +1383,51 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "darling_core" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2 1.0.85", + "quote 1.0.36", + "strsim 0.11.1", + "syn 2.0.66", +] + [[package]] name = "darling_macro" version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ - "darling_core", + "darling_core 0.13.4", "quote 1.0.36", "syn 1.0.109", ] +[[package]] +name = "darling_macro" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +dependencies = [ + "darling_core 0.20.10", + "quote 1.0.36", + "syn 2.0.66", +] + +[[package]] +name = "debug-map-sorted" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75c7dfa83618734bf9fa07aadaa1166b634e9427bb9bc5a1c2332d04d73fb721" +dependencies = [ + "itertools 0.10.5", +] + [[package]] name = "debugid" version = "0.8.0" @@ -1503,6 +1571,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" +[[package]] +name = "dyn-clone" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" + [[package]] name = "ecdsa" version = "0.14.8" @@ -1784,6 +1858,16 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener", + "pin-project-lite", +] + [[package]] name = "fastrand" version = "2.1.0" @@ -1862,6 +1946,15 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "fluent-uri" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17c704e9dbe1ddd863da1e6ff3567795087b1eb201ce80d8fa81162e1516500d" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "flume" version = "0.11.0" @@ -2300,6 +2393,20 @@ dependencies = [ "tracing", ] +[[package]] +name = "handlebars" +version = "3.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4498fc115fa7d34de968184e473529abb40eeb6be8bc5f7faba3d08c316cb3e3" +dependencies = [ + "log", + "pest", + "pest_derive", + "quick-error 2.0.1", + "serde", + "serde_json", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -2328,6 +2435,30 @@ dependencies = [ "hashbrown 0.14.5", ] +[[package]] +name = "headers" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322106e6bd0cba2d5ead589ddb8150a13d7c4217cf80d7c4f682ca994ccc6aa9" +dependencies = [ + "base64 0.21.7", + "bytes", + "headers-core", + "http 1.1.0", + "httpdate", + "mime", + "sha1", +] + +[[package]] +name = "headers-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" +dependencies = [ + "http 1.1.0", +] + [[package]] name = "heck" version = "0.3.3" @@ -2521,6 +2652,26 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-http-proxy" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d06dbdfbacf34d996c6fb540a71a684a7aae9056c71951163af8a8a4c07b9a4" +dependencies = [ + "bytes", + "futures-util", + "headers", + "http 1.1.0", + "hyper 1.3.1", + "hyper-rustls", + "hyper-util", + "pin-project-lite", + "rustls-native-certs", + "tokio", + "tokio-rustls", + "tower-service", +] + [[package]] name = "hyper-rustls" version = "0.27.2" @@ -2533,6 +2684,7 @@ dependencies = [ "hyper-util", "log", "rustls", + "rustls-native-certs", "rustls-pki-types", "tokio", "tokio-rustls", @@ -2710,6 +2862,15 @@ dependencies = [ "regex", ] +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + [[package]] name = "ipnet" version = "2.9.0" @@ -2822,6 +2983,44 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json-patch" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b1fb8864823fad91877e6caea0baca82e49e8db50f8e5c9f9a453e27d3330fc" +dependencies = [ + "jsonptr", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "jsonpath-rust" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d8fe85bd70ff715f31ce8c739194b423d79811a19602115d611a3ec85d6200" +dependencies = [ + "lazy_static", + "once_cell", + "pest", + "pest_derive", + "regex", + "serde_json", + "thiserror", +] + +[[package]] +name = "jsonptr" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c6e529149475ca0b2820835d3dce8fcc41c6b943ca608d32f35b449255e4627" +dependencies = [ + "fluent-uri", + "serde", + "serde_json", +] + [[package]] name = "jsonrpsee" version = "0.23.2" @@ -3006,6 +3205,19 @@ dependencies = [ "signature 2.2.0", ] +[[package]] +name = "k8s-openapi" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8847402328d8301354c94d605481f25a6bdc1ed65471fd96af8eca71141b13" +dependencies = [ + "base64 0.22.1", + "chrono", + "serde", + "serde-value", + "serde_json", +] + [[package]] name = "keccak" version = "0.1.5" @@ -3015,6 +3227,116 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "kube" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa21063c854820a77c5d7f8deeb7ffa55246d8304e4bcd8cce2956752c6604f8" +dependencies = [ + "k8s-openapi", + "kube-client", + "kube-core", + "kube-derive", + "kube-runtime", +] + +[[package]] +name = "kube-client" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31c2355f5c9d8a11900e71a6fe1e47abd5ec45bf971eb4b162ffe97b46db9bb7" +dependencies = [ + "base64 0.22.1", + "bytes", + "chrono", + "either", + "futures 0.3.30", + "home", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.3.1", + "hyper-http-proxy", + "hyper-rustls", + "hyper-timeout", + "hyper-util", + "jsonpath-rust", + "k8s-openapi", + "kube-core", + "pem", + "rustls", + "rustls-pemfile 2.1.2", + "secrecy", + "serde", + "serde_json", + "serde_yaml", + "thiserror", + "tokio", + "tokio-util", + "tower", + "tower-http", + "tracing", +] + +[[package]] +name = "kube-core" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3030bd91c9db544a50247e7d48d7db9cf633c172732dce13351854526b1e666" +dependencies = [ + "chrono", + "form_urlencoded", + "http 1.1.0", + "json-patch", + "k8s-openapi", + "schemars", + "serde", + "serde-value", + "serde_json", + "thiserror", +] + +[[package]] +name = "kube-derive" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa98be978eddd70a773aa8e86346075365bfb7eb48783410852dbf7cb57f0c27" +dependencies = [ + "darling 0.20.10", + "proc-macro2 1.0.85", + "quote 1.0.36", + "serde_json", + "syn 2.0.66", +] + +[[package]] +name = "kube-runtime" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5895cb8aa641ac922408f128b935652b34c2995f16ad7db0984f6caa50217914" +dependencies = [ + "ahash 0.8.11", + "async-broadcast", + "async-stream", + "async-trait", + "backoff", + "derivative", + "futures 0.3.30", + "hashbrown 0.14.5", + "json-patch", + "jsonptr", + "k8s-openapi", + "kube-client", + "parking_lot", + "pin-project", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -3812,6 +4134,51 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "pest" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a548d2beca6773b1c244554d36fcf8548a8a58e74156968211567250e48e49a" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c93a82e8d145725dcbaf44e5ea887c8a869efdcc28706df2d08c69e17077183" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + +[[package]] +name = "pest_meta" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a941429fea7e08bedec25e4f6785b6ffaacc6b755da98df5ef3e7dcf4a124c4f" +dependencies = [ + "once_cell", + "pest", + "sha2 0.10.8", +] + [[package]] name = "petgraph" version = "0.6.5" @@ -4276,6 +4643,12 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +[[package]] +name = "quick-error" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" + [[package]] name = "quick-protobuf" version = "0.8.1" @@ -4756,9 +5129,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.10" +version = "0.23.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" dependencies = [ "aws-lc-rs", "log", @@ -4837,9 +5210,9 @@ checksum = "84e217e7fdc8466b5b35d30f8c0a30febd29173df4a3a0c2115d306b9c4117ad" [[package]] name = "rustls-webpki" -version = "0.102.4" +version = "0.102.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" dependencies = [ "aws-lc-rs", "ring", @@ -4860,7 +5233,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" dependencies = [ "fnv", - "quick-error", + "quick-error 1.2.3", "tempfile", "wait-timeout", ] @@ -4889,6 +5262,30 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "schemars" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09c024468a378b7e36765cd36702b7a90cc3cba11654f6685c8f233408e89e92" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1eee588578aff73f856ab961cd2f79e36bc45d7ded33a7562adba4667aecc0e" +dependencies = [ + "proc-macro2 1.0.85", + "quote 1.0.36", + "serde_derive_internals", + "syn 2.0.66", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -4953,6 +5350,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ + "serde", "zeroize", ] @@ -5136,13 +5534,25 @@ dependencies = [ "syn 2.0.66", ] +[[package]] +name = "serde_derive_internals" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" +dependencies = [ + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "serde_json" -version = "1.0.117" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] @@ -5187,7 +5597,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ - "darling", + "darling 0.13.4", "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", @@ -6092,6 +6502,7 @@ dependencies = [ "futures-io", "futures-sink", "pin-project-lite", + "slab", "tokio", ] @@ -6195,6 +6606,25 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-http" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +dependencies = [ + "base64 0.21.7", + "bitflags 2.6.0", + "bytes", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "mime", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-layer" version = "0.3.2" @@ -6319,6 +6749,12 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "ucd-trie" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" + [[package]] name = "uint" version = "0.9.5" @@ -6449,9 +6885,9 @@ dependencies = [ [[package]] name = "url" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna", @@ -7380,9 +7816,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4724d51934e475c846ba9e6ed169e25587385188b928a9ecfbbf616092a1c17" +checksum = "035269d811b3770debca372141ab64cad067dce8e58cb39a48cb7617d30c626b" dependencies = [ "anyhow", "once_cell", @@ -7405,8 +7841,12 @@ dependencies = [ "rand 0.8.5", "secrecy", "serde", + "strum", + "strum_macros", + "time", "tracing", "url", + "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -7416,9 +7856,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7760e7a140f16f0435fbf2ad9a4b09feaad74568d05b553751d222f4803a42e" +checksum = "49e38d1b5ed28c66e785caff53ea4863375555d818aafa03290397192dd3e665" dependencies = [ "anyhow", "blst", @@ -7437,9 +7877,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96f903187836210602beba27655e111e22efb229ef90bd2a95a3d6799b31685c" +checksum = "e49fbd4e69b276058f3dfc06cf6ada0e8caa6ed826e81289e4d596da95a0f17a" dependencies = [ "anyhow", "bit-vec", @@ -7459,9 +7899,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff43cfd03ea205c763e74362dc6ec5a4d74b6b1baef0fb134dde92a8880397f7" +checksum = "b2b2aab4ed18b13cd584f4edcc2546c8da82f89ac62e525063e12935ff28c9be" dependencies = [ "anyhow", "async-trait", @@ -7479,9 +7919,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1020308512c01ab80327fb874b5b61c6fd513a6b26c8a5fce3e077600da04e4b" +checksum = "10bac8f471b182d4fa3d40cf158aac3624fe636a1ff0b4cf3fe26a0e20c68a42" dependencies = [ "anyhow", "rand 0.8.5", @@ -7566,10 +8006,13 @@ dependencies = [ "tracing", "vise", "zksync_concurrency", + "zksync_consensus_crypto", "zksync_consensus_roles", "zksync_consensus_storage", + "zksync_consensus_utils", "zksync_contracts", "zksync_db_connection", + "zksync_l1_contract_interface", "zksync_protobuf", "zksync_protobuf_build", "zksync_system_constants", @@ -7678,6 +8121,21 @@ dependencies = [ "zkevm_circuits 0.150.5", ] +[[package]] +name = "zksync_l1_contract_interface" +version = "0.1.0" +dependencies = [ + "anyhow", + "hex", + "once_cell", + "sha2 0.10.8", + "sha3 0.10.8", + "zksync_kzg", + "zksync_prover_interface", + "zksync_solidity_vk_codegen", + "zksync_types", +] + [[package]] name = "zksync_mini_merkle_tree" version = "0.1.0" @@ -7789,9 +8247,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2d9ce9b9697daae6023c8da5cfe8764690a9d9c91ff32b8e1e54a7c8301fb3" +checksum = "abd55c64f54cb10967a435422f66ff5880ae14a232b245517c7ce38da32e0cab" dependencies = [ "anyhow", "bit-vec", @@ -7810,9 +8268,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903c23a12e160a703f9b68d0dd961daa24156af912ca1bc9efb74969f3acc645" +checksum = "4121952bcaf711005dd554612fc6e2de9b30cb58088508df87f1d38046ce8ac8" dependencies = [ "anyhow", "heck 0.5.0", @@ -7836,6 +8294,7 @@ dependencies = [ "secrecy", "serde_json", "serde_yaml", + "time", "tracing", "zksync_basic_types", "zksync_config", @@ -7844,6 +8303,44 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_prover_autoscaler" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "axum", + "chrono", + "clap 4.5.4", + "ctrlc", + "debug-map-sorted", + "futures 0.3.30", + "k8s-openapi", + "kube", + "once_cell", + "regex", + "reqwest 0.12.5", + "ring", + "rustls", + "serde", + "serde_json", + "structopt", + "strum", + "time", + "tokio", + "tracing", + "tracing-subscriber", + "url", + "vise", + "zksync_config", + "zksync_core_leftovers", + "zksync_protobuf_config", + "zksync_prover_job_monitor", + "zksync_types", + "zksync_utils", + "zksync_vlog", +] + [[package]] name = "zksync_prover_dal" version = "0.1.0" @@ -8013,6 +8510,23 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_solidity_vk_codegen" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b310ab8a21681270e73f177ddf7974cabb7a96f0624ab8b008fd6ee1f9b4f687" +dependencies = [ + "ethereum-types", + "franklin-crypto", + "handlebars", + "hex", + "paste", + "rescue_poseidon", + "serde", + "serde_derive", + "serde_json", +] + [[package]] name = "zksync_system_constants" version = "0.1.0" diff --git a/prover/Cargo.toml b/prover/Cargo.toml index e95bae3d4c16..742eee649de1 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -1,8 +1,5 @@ [workspace] -members = [ - "crates/bin/*", - "crates/lib/*", -] +members = ["crates/bin/*", "crates/lib/*"] resolver = "2" @@ -19,20 +16,23 @@ categories = ["cryptography"] [workspace.dependencies] # Common dependencies anyhow = "1.0" -axum = "0.7.5" async-trait = "0.1" +axum = "0.7.5" bincode = "1" chrono = "0.4.38" clap = "4.4.6" colored = "2.0" const-decoder = "0.3.0" ctrlc = "3.1" +debug-map-sorted = "0.1.1" dialoguer = "0.11" futures = "0.3" hex = "0.4" -itertools = "0.10.5" indicatif = "0.16" +itertools = "0.10.5" jemallocator = "0.5" +k8s-openapi = { version = "0.23.0", features = ["v1_30"] } +kube = { version = "0.95.0", features = ["runtime", "derive"] } local-ip-address = "0.5.0" log = "0.4.20" md5 = "0.7.0" @@ -42,6 +42,8 @@ queues = "1.1.0" rand = "0.8" regex = "1.10.4" reqwest = "0.12" +ring = "0.17.8" +rustls = { version = "0.23.12", features = ["ring"] } serde = "1.0" serde_derive = "1.0" serde_json = "1.0" @@ -50,11 +52,13 @@ sqlx = { version = "0.8.1", default-features = false } structopt = "0.3.26" strum = { version = "0.26" } tempfile = "3" +time = "0.3.36" tokio = "1" tokio-util = "0.7.11" toml_edit = "0.14.4" tracing = "0.1" tracing-subscriber = "0.3" +url = "2.5.2" vise = "0.2.0" # Proving dependencies @@ -84,6 +88,7 @@ zksync_eth_client = { path = "../core/lib/eth_client" } zksync_contracts = { path = "../core/lib/contracts" } zksync_core_leftovers = { path = "../core/lib/zksync_core_leftovers" } zksync_periodic_job = { path = "../core/lib/periodic_job" } +zksync_protobuf_config = { path = "../core/lib/protobuf_config" } # Prover workspace dependencies zksync_prover_dal = { path = "crates/lib/prover_dal" } @@ -91,6 +96,7 @@ zksync_prover_fri_types = { path = "crates/lib/prover_fri_types" } zksync_prover_fri_utils = { path = "crates/lib/prover_fri_utils" } zksync_prover_keystore = { path = "crates/lib/keystore" } zksync_vk_setup_data_generator_server_fri = { path = "crates/bin/vk_setup_data_generator_server_fri" } +zksync_prover_job_monitor = { path = "crates/bin/prover_job_monitor" } # for `perf` profiling [profile.perf] diff --git a/prover/crates/bin/prover_autoscaler/Cargo.toml b/prover/crates/bin/prover_autoscaler/Cargo.toml new file mode 100644 index 000000000000..9743b45593e7 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "zksync_prover_autoscaler" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +zksync_core_leftovers.workspace = true +zksync_vlog.workspace = true +zksync_utils.workspace = true +zksync_types.workspace = true +zksync_config = { workspace = true, features = ["observability_ext"] } +zksync_prover_job_monitor.workspace = true +zksync_protobuf_config.workspace = true + +debug-map-sorted.workspace = true +anyhow.workspace = true +async-trait.workspace = true +axum.workspace = true +chrono.workspace = true +clap = { workspace = true, features = ["derive"] } +ctrlc = { workspace = true, features = ["termination"] } +futures.workspace = true +k8s-openapi = { workspace = true, features = ["v1_30"] } +kube = { workspace = true, features = ["runtime", "derive"] } +once_cell.workspace = true +regex.workspace = true +reqwest = { workspace = true, features = ["json"] } +ring.workspace = true +rustls = { workspace = true, features = ["ring"] } +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +structopt.workspace = true +strum.workspace = true +time.workspace = true +tokio = { workspace = true, features = ["time", "macros"] } +tracing-subscriber = { workspace = true, features = ["env-filter"] } +tracing.workspace = true +url.workspace = true +vise.workspace = true diff --git a/prover/crates/bin/prover_autoscaler/src/agent.rs b/prover/crates/bin/prover_autoscaler/src/agent.rs new file mode 100644 index 000000000000..3269a43815c9 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/agent.rs @@ -0,0 +1,130 @@ +use std::net::SocketAddr; + +use anyhow::Context as _; +use axum::{ + extract::State, + response::IntoResponse, + routing::{get, post}, + Json, Router, +}; +use futures::future; +use reqwest::StatusCode; +use serde::{Deserialize, Serialize}; +use tokio::sync::watch; + +use crate::{ + cluster_types::Cluster, + k8s::{Scaler, Watcher}, +}; + +struct AppError(anyhow::Error); + +impl IntoResponse for AppError { + fn into_response(self) -> axum::response::Response { + ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Something went wrong: {}", self.0), + ) + .into_response() + } +} + +pub async fn run_server( + port: u16, + watcher: Watcher, + scaler: Scaler, + mut stop_receiver: watch::Receiver, +) -> anyhow::Result<()> { + let bind_address = SocketAddr::from(([0, 0, 0, 0], port)); + tracing::debug!("Starting Autoscaler agent on {bind_address}"); + let app = create_agent_router(watcher, scaler); + + let listener = tokio::net::TcpListener::bind(bind_address) + .await + .with_context(|| format!("Failed binding Autoscaler agent to {bind_address}"))?; + axum::serve(listener, app) + .with_graceful_shutdown(async move { + if stop_receiver.changed().await.is_err() { + tracing::warn!( + "Stop signal sender for Autoscaler agent was dropped without sending a signal" + ); + } + tracing::info!("Stop signal received, Autoscaler agent is shutting down"); + }) + .await + .context("Autoscaler agent failed")?; + tracing::info!("Autoscaler agent shut down"); + Ok(()) +} + +fn create_agent_router(watcher: Watcher, scaler: Scaler) -> Router { + let app = App { watcher, scaler }; + Router::new() + .route("/healthz", get(health)) + .route("/cluster", get(get_cluster)) + .route("/scale", post(scale)) + .with_state(app) +} + +// TODO: Use +// https://github.com/matter-labs/zksync-era/blob/9821a20018c367ce246dba656daab5c2e7757973/core/node/api_server/src/healthcheck.rs#L53 +// instead. +async fn health() -> &'static str { + "Ok\n" +} + +#[derive(Clone)] +struct App { + watcher: Watcher, + scaler: Scaler, +} + +async fn get_cluster(State(app): State) -> Result, AppError> { + let cluster = app.watcher.cluster.lock().await.clone(); + Ok(Json(cluster)) +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ScaleDeploymentRequest { + pub namespace: String, + pub name: String, + pub size: i32, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ScaleRequest { + pub deployments: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ScaleResponse { + pub scale_result: Vec, +} + +/// To test or forse scale in particular cluster use: +/// $ curl -X POST -H "Content-Type: application/json" --data '{"deployments": [{"namespace": "prover-red", "name": "witness-vector-generator-spec-9-f", "size":0},{"namespace": "prover-red", "name": "witness-vector-generator-spec-9-c", "size":0}]}' :8081/scale +async fn scale( + State(app): State, + Json(payload): Json, +) -> Result, AppError> { + let handles: Vec<_> = payload + .deployments + .into_iter() + .map(|d| { + let s = app.scaler.clone(); + tokio::spawn(async move { + match s.scale(&d.namespace, &d.name, d.size).await { + Ok(()) => "".to_string(), + Err(err) => err.to_string(), + } + }) + }) + .collect(); + + let scale_result = future::join_all(handles) + .await + .into_iter() + .map(Result::unwrap) + .collect(); + Ok(Json(ScaleResponse { scale_result })) +} diff --git a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs new file mode 100644 index 000000000000..b074e0774c97 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs @@ -0,0 +1,66 @@ +use std::collections::{BTreeMap, HashMap}; + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize, Serializer}; +use strum::{Display, EnumString}; + +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct Pod { + // pub name: String, // TODO: Consider if it's needed. + pub owner: String, + pub status: String, + pub changed: DateTime, +} +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct Deployment { + // pub name: String, // TODO: Consider if it's needed. + pub running: i32, + pub desired: i32, +} + +fn ordered_map( + value: &HashMap, + serializer: S, +) -> Result +where + S: Serializer, +{ + let ordered: BTreeMap<_, _> = value.iter().collect(); + ordered.serialize(serializer) +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Namespace { + #[serde(serialize_with = "ordered_map")] + pub deployments: HashMap, + pub pods: HashMap, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Cluster { + pub name: String, + pub namespaces: HashMap, +} +impl Default for Cluster { + fn default() -> Self { + Self { + name: "".to_string(), + namespaces: HashMap::new(), + } + } +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Clusters { + pub clusters: HashMap, +} + +#[derive(Default, Debug, EnumString, Display, Hash, PartialEq, Eq, Clone, Copy)] +pub enum PodStatus { + #[default] + Unknown, + Running, + Pending, + LongPending, + NeedToMove, +} diff --git a/prover/crates/bin/prover_autoscaler/src/global/mod.rs b/prover/crates/bin/prover_autoscaler/src/global/mod.rs new file mode 100644 index 000000000000..5e4afb938437 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/mod.rs @@ -0,0 +1,3 @@ +pub mod queuer; +pub mod scaler; +pub mod watcher; diff --git a/prover/crates/bin/prover_autoscaler/src/global/queuer.rs b/prover/crates/bin/prover_autoscaler/src/global/queuer.rs new file mode 100644 index 000000000000..1ef5d96386b5 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/queuer.rs @@ -0,0 +1,41 @@ +use std::collections::HashMap; + +use anyhow::{Context, Ok}; +use reqwest::Method; +use zksync_prover_job_monitor::autoscaler_queue_reporter::VersionedQueueReport; +use zksync_utils::http_with_retries::send_request_with_retries; + +#[derive(Debug)] +pub struct Queue { + pub queue: HashMap, +} + +#[derive(Default)] +pub struct Queuer { + pub prover_job_monitor_url: String, +} + +impl Queuer { + pub fn new(pjm_url: String) -> Self { + Self { + prover_job_monitor_url: pjm_url, + } + } + + pub async fn get_queue(&self) -> anyhow::Result { + let url = &self.prover_job_monitor_url; + let response = send_request_with_retries(url, 5, Method::GET, None, None).await; + let res = response + .map_err(|err| anyhow::anyhow!("Failed fetching queue from url: {url}: {err:?}"))? + .json::>() + .await + .context("Failed to read response as json")?; + + Ok(Queue { + queue: res + .iter() + .map(|x| (x.version.to_string(), x.report.prover_jobs.queued as u64)) + .collect::>(), + }) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs new file mode 100644 index 000000000000..9f37c4d11675 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs @@ -0,0 +1,360 @@ +use std::{collections::HashMap, str::FromStr}; + +use chrono::Utc; +use debug_map_sorted::SortedOutputExt; +use once_cell::sync::Lazy; +use regex::Regex; +use zksync_config::configs::prover_autoscaler::{Gpu, ProverAutoscalerScalerConfig}; + +use super::{queuer, watcher}; +use crate::{ + cluster_types::{Cluster, Clusters, Pod, PodStatus}, + metrics::AUTOSCALER_METRICS, + task_wiring::Task, +}; + +const DEFAULT_SPEED: u32 = 500; + +#[derive(Default, Debug, PartialEq, Eq)] +struct GPUPool { + name: String, + gpu: Gpu, + provers: HashMap, // TODO: consider using i64 everywhere to avoid type casts. + preemtions: u64, + max_pool_size: u32, +} + +impl GPUPool { + fn sum_by_pod_status(&self, ps: PodStatus) -> u32 { + self.provers.get(&ps).cloned().unwrap_or(0) + } + + fn to_key(&self) -> GPUPoolKey { + GPUPoolKey { + cluster: self.name.clone(), + gpu: self.gpu, + } + } +} + +#[derive(Debug, Eq, Hash, PartialEq)] +struct GPUPoolKey { + cluster: String, + gpu: Gpu, +} + +static PROVER_DEPLOYMENT_RE: Lazy = + Lazy::new(|| Regex::new(r"^prover-gpu-fri-spec-(\d{1,2})?(-(?[ltvpa]\d+))?$").unwrap()); +static PROVER_POD_RE: Lazy = + Lazy::new(|| Regex::new(r"^prover-gpu-fri-spec-(\d{1,2})?(-(?[ltvpa]\d+))?").unwrap()); + +pub struct Scaler { + /// namespace to Protocol Version configuration. + namespaces: HashMap, + watcher: watcher::Watcher, + queuer: queuer::Queuer, + + /// Which cluster to use first. + cluster_priorities: HashMap, + prover_speed: HashMap, + long_pending_duration: chrono::Duration, +} + +struct ProverPodGpu<'a> { + name: &'a str, + pod: &'a Pod, + gpu: Gpu, +} + +impl<'a> ProverPodGpu<'a> { + fn new(name: &'a str, pod: &'a Pod) -> Option> { + PROVER_POD_RE.captures(name).map(|caps| Self { + name, + pod, + gpu: Gpu::from_str(caps.name("gpu").map_or("l4", |m| m.as_str())).unwrap_or_default(), + }) + } +} + +impl Scaler { + pub fn new( + watcher: watcher::Watcher, + queuer: queuer::Queuer, + config: ProverAutoscalerScalerConfig, + ) -> Self { + Self { + namespaces: config.protocol_versions, + watcher, + queuer, + cluster_priorities: config.cluster_priorities, + prover_speed: config.prover_speed, + long_pending_duration: chrono::Duration::seconds( + config.long_pending_duration.whole_seconds(), + ), + } + } + + fn convert_to_gpu_pool(&self, namespace: &String, cluster: &Cluster) -> Vec { + let mut gp_map = HashMap::new(); // + let Some(namespace_value) = &cluster.namespaces.get(namespace) else { + // No namespace in config, ignoring. + return vec![]; + }; + + for caps in namespace_value + .deployments + .keys() + .filter_map(|dn| PROVER_DEPLOYMENT_RE.captures(dn)) + { + // Processing only provers. + let gpu = + Gpu::from_str(caps.name("gpu").map_or("l4", |m| m.as_str())).unwrap_or_default(); + let e = gp_map.entry(gpu).or_insert(GPUPool { + name: cluster.name.clone(), + gpu, + max_pool_size: 100, // TODO: get from the agent. + ..Default::default() + }); + + // Initialize pool only if we have ready deployments. + e.provers.insert(PodStatus::Running, 0); + } + + for ppg in namespace_value + .pods + .iter() + .filter_map(|(pn, pv)| ProverPodGpu::new(pn, pv)) + { + let e = gp_map.entry(ppg.gpu).or_insert(GPUPool { + name: cluster.name.clone(), + gpu: ppg.gpu, + ..Default::default() + }); + let mut status = PodStatus::from_str(&ppg.pod.status).unwrap_or_default(); + if status == PodStatus::Pending + && ppg.pod.changed < Utc::now() - self.long_pending_duration + { + status = PodStatus::LongPending; + } + tracing::info!( + "pod {}: status: {}, real status: {}", + ppg.name, + status, + ppg.pod.status + ); + e.provers.entry(status).and_modify(|n| *n += 1).or_insert(1); + } + + tracing::info!("From pods {:?}", gp_map.sorted_debug()); + + gp_map.into_values().collect() + } + + fn sorted_clusters(&self, namespace: &String, clusters: &Clusters) -> Vec { + let mut gpu_pools: Vec = clusters + .clusters + .values() + .flat_map(|c| self.convert_to_gpu_pool(namespace, c)) + .collect(); + + gpu_pools.sort_by(|a, b| { + a.gpu + .cmp(&b.gpu) // Sort by GPU first. + .then( + a.sum_by_pod_status(PodStatus::NeedToMove) + .cmp(&b.sum_by_pod_status(PodStatus::NeedToMove)), + ) // Sort by need to evict. + .then( + a.sum_by_pod_status(PodStatus::LongPending) + .cmp(&b.sum_by_pod_status(PodStatus::LongPending)), + ) // Sort by long Pending pods. + .then(a.preemtions.cmp(&b.preemtions)) // Sort by preemtions in the cluster. + .then( + self.cluster_priorities + .get(&a.name) + .unwrap_or(&1000) + .cmp(self.cluster_priorities.get(&b.name).unwrap_or(&1000)), + ) // Sort by priority. + .then(b.max_pool_size.cmp(&a.max_pool_size)) // Reverse sort by cluster size. + }); + + gpu_pools + } + + fn speed(&self, gpu: Gpu) -> u64 { + self.prover_speed + .get(&gpu) + .cloned() + .unwrap_or(DEFAULT_SPEED) + .into() + } + + fn provers_to_speed(&self, gpu: Gpu, n: u32) -> u64 { + self.speed(gpu) * n as u64 + } + + fn normalize_queue(&self, gpu: Gpu, q: u64) -> u64 { + let speed = self.speed(gpu); + // Divide and round up if there's any remainder. + (q + speed - 1) / speed * speed + } + + fn run(&self, namespace: &String, q: u64, clusters: &Clusters) -> HashMap { + let sc = self.sorted_clusters(namespace, clusters); + tracing::debug!("Sorted clusters for namespace {}: {:?}", namespace, &sc); + + let mut total: i64 = 0; + let mut provers: HashMap = HashMap::new(); + for c in &sc { + for (status, p) in &c.provers { + match status { + PodStatus::Running | PodStatus::Pending => { + total += self.provers_to_speed(c.gpu, *p) as i64; + provers + .entry(c.to_key()) + .and_modify(|x| *x += p) + .or_insert(*p); + } + _ => (), // Ignore LongPending as not running here. + } + } + } + + // Remove unneeded pods. + if (total as u64) > self.normalize_queue(Gpu::L4, q) { + for c in sc.iter().rev() { + let mut excess_queue = total as u64 - self.normalize_queue(c.gpu, q); + let mut excess_provers = (excess_queue / self.speed(c.gpu)) as u32; + let p = provers.entry(c.to_key()).or_default(); + if *p < excess_provers { + excess_provers = *p; + excess_queue = *p as u64 * self.speed(c.gpu); + } + *p -= excess_provers; + total -= excess_queue as i64; + if total <= 0 { + break; + }; + } + } + + // Reduce load in over capacity pools. + for c in &sc { + let p = provers.entry(c.to_key()).or_default(); + if c.max_pool_size < *p { + let excess = *p - c.max_pool_size; + total -= excess as i64 * self.speed(c.gpu) as i64; + *p -= excess; + } + } + + tracing::debug!("Queue coverd with provers: {}", total); + // Add required provers. + if (total as u64) < q { + for c in &sc { + let mut required_queue = q - total as u64; + let mut required_provers = + (self.normalize_queue(c.gpu, required_queue) / self.speed(c.gpu)) as u32; + let p = provers.entry(c.to_key()).or_default(); + if *p + required_provers > c.max_pool_size { + required_provers = c.max_pool_size - *p; + required_queue = required_provers as u64 * self.speed(c.gpu); + } + *p += required_provers; + total += required_queue as i64; + } + } + + tracing::debug!("run result: provers {:?}, total: {}", &provers, total); + + provers + } +} + +#[async_trait::async_trait] +impl Task for Scaler { + async fn invoke(&self) -> anyhow::Result<()> { + let queue = self.queuer.get_queue().await.unwrap(); + + // TODO: Check that clusters data is ready. + let clusters = self.watcher.clusters.lock().await; + for (ns, ppv) in &self.namespaces { + let q = queue.queue.get(ppv).cloned().unwrap_or(0); + if q > 0 { + let provers = self.run(ns, q, &clusters); + for (k, num) in &provers { + AUTOSCALER_METRICS.provers[&(k.cluster.clone(), ns.clone(), k.gpu)] + .set(*num as u64); + } + // TODO: compare before and desired, send commands [cluster,namespace,deployment] -> provers + } + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use tokio::sync::Mutex; + + use super::*; + use crate::{ + cluster_types::{self, Deployment, Namespace, Pod}, + global::{queuer, watcher}, + }; + + #[test] + fn test_run() { + let watcher = watcher::Watcher { + cluster_agents: vec![], + clusters: Arc::new(Mutex::new(cluster_types::Clusters { + ..Default::default() + })), + }; + let queuer = queuer::Queuer { + prover_job_monitor_url: "".to_string(), + }; + let scaler = Scaler::new(watcher, queuer, ProverAutoscalerScalerConfig::default()); + let got = scaler.run( + &"prover".to_string(), + 1499, + &Clusters { + clusters: HashMap::from([( + "foo".to_string(), + Cluster { + name: "foo".to_string(), + namespaces: HashMap::from([( + "prover".to_string(), + Namespace { + deployments: HashMap::from([( + "prover-gpu-fri-spec-1".to_string(), + Deployment { + ..Default::default() + }, + )]), + pods: HashMap::from([( + "prover-gpu-fri-spec-1-c47644679-x9xqp".to_string(), + Pod { + status: "Running".to_string(), + ..Default::default() + }, + )]), + }, + )]), + }, + )]), + }, + ); + let want = HashMap::from([( + GPUPoolKey { + cluster: "foo".to_string(), + gpu: Gpu::L4, + }, + 3, + )]); + assert!(got == want); + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs new file mode 100644 index 000000000000..ef3ebd3b8193 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs @@ -0,0 +1,89 @@ +use std::{collections::HashMap, sync::Arc}; + +use anyhow::{Context, Ok}; +use futures::future; +use reqwest::Method; +use tokio::sync::Mutex; +use url::Url; +use zksync_utils::http_with_retries::send_request_with_retries; + +use crate::{ + cluster_types::{Cluster, Clusters}, + task_wiring::Task, +}; + +#[derive(Clone)] +pub struct Watcher { + /// List of base URLs of all agents. + pub cluster_agents: Vec>, + pub clusters: Arc>, +} + +impl Watcher { + pub fn new(agent_urls: Vec) -> Self { + Self { + cluster_agents: agent_urls + .into_iter() + .map(|u| { + Arc::new( + Url::parse(&u) + .unwrap_or_else(|e| panic!("Unparsable Agent URL {}: {}", u, e)), + ) + }) + .collect(), + clusters: Arc::new(Mutex::new(Clusters { + clusters: HashMap::new(), + })), + } + } +} + +#[async_trait::async_trait] +impl Task for Watcher { + async fn invoke(&self) -> anyhow::Result<()> { + let handles: Vec<_> = self + .cluster_agents + .clone() + .into_iter() + .map(|a| { + tracing::debug!("Getting cluster data from agent {}.", a); + tokio::spawn(async move { + let url: String = a + .clone() + .join("/cluster") + .context("Failed to join URL with /cluster")? + .to_string(); + let response = + send_request_with_retries(&url, 5, Method::GET, None, None).await; + response + .map_err(|err| { + anyhow::anyhow!("Failed fetching cluster from url: {url}: {err:?}") + })? + .json::() + .await + .context("Failed to read response as json") + }) + }) + .collect(); + + future::try_join_all( + future::join_all(handles) + .await + .into_iter() + .map(|h| async move { + let c = h.unwrap().unwrap(); + self.clusters + .lock() + .await + .clusters + .insert(c.name.clone(), c); + Ok(()) + }) + .collect::>(), + ) + .await + .unwrap(); + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/mod.rs b/prover/crates/bin/prover_autoscaler/src/k8s/mod.rs new file mode 100644 index 000000000000..0804b9eaa404 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/k8s/mod.rs @@ -0,0 +1,5 @@ +pub use scaler::Scaler; +pub use watcher::Watcher; + +mod scaler; +mod watcher; diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs b/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs new file mode 100644 index 000000000000..170b0b106507 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs @@ -0,0 +1,27 @@ +use k8s_openapi::api; +use kube::api::{Api, Patch, PatchParams}; + +#[derive(Clone)] +pub struct Scaler { + pub client: kube::Client, +} + +impl Scaler { + pub async fn scale(&self, namespace: &str, name: &str, size: i32) -> anyhow::Result<()> { + let deployments: Api = + Api::namespaced(self.client.clone(), namespace); + + let patch = serde_json::json!({ + "apiVersion": "apps/v1", + "kind": "Deployment", + "spec": { + "replicas": size + } + }); + let pp = PatchParams::default(); + deployments.patch(name, &pp, &Patch::Merge(patch)).await?; + tracing::info!("Scaled deployment/{} to {} replica(s).", name, size); + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs new file mode 100644 index 000000000000..8746d17663be --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs @@ -0,0 +1,138 @@ +use std::{collections::HashMap, sync::Arc}; + +use chrono::Utc; +use futures::{stream, StreamExt, TryStreamExt}; +use k8s_openapi::api; +use kube::{ + api::{Api, ResourceExt}, + runtime::{watcher, WatchStreamExt}, +}; +use tokio::sync::Mutex; + +use crate::{ + cluster_types::{Cluster, Deployment, Namespace, Pod}, + metrics::AUTOSCALER_METRICS, +}; + +#[derive(Clone)] +pub struct Watcher { + pub client: kube::Client, + pub cluster: Arc>, +} + +impl Watcher { + pub fn new(client: kube::Client, cluster_name: String, namespaces: Vec) -> Self { + let mut ns = HashMap::new(); + namespaces.into_iter().for_each(|n| { + ns.insert(n, Namespace::default()); + }); + + Self { + client, + cluster: Arc::new(Mutex::new(Cluster { + name: cluster_name, + namespaces: ns, + })), + } + } + + pub async fn run(self) -> anyhow::Result<()> { + // TODO: add actual metrics + AUTOSCALER_METRICS.protocol_version.set(1); + AUTOSCALER_METRICS.calls.inc_by(1); + + // TODO: watch for a list of namespaces, get: + // - deployments (name, running, desired) [done] + // - pods (name, parent deployment, statuses, when the last status change) [~done] + // - events (number of scheduling failures in last N seconds, which deployments) + // - events (preemptions, which deployment, when, how many) + // - pool size from GCP (name, size, which GPU) + let mut watchers = vec![]; + for namespace in self.cluster.lock().await.namespaces.keys() { + let deployments: Api = + Api::namespaced(self.client.clone(), namespace); + watchers.push( + watcher(deployments, watcher::Config::default()) + .default_backoff() + .applied_objects() + .map_ok(Watched::Deploy) + .boxed(), + ); + + let pods: Api = Api::namespaced(self.client.clone(), namespace); + watchers.push( + watcher(pods, watcher::Config::default()) + .default_backoff() + .applied_objects() + .map_ok(Watched::Pod) + .boxed(), + ); + } + // select on applied events from all watchers + let mut combo_stream = stream::select_all(watchers); + // SelectAll Stream elements must have the same Item, so all packed in this: + #[allow(clippy::large_enum_variant)] + enum Watched { + Deploy(api::apps::v1::Deployment), + Pod(api::core::v1::Pod), + } + while let Some(o) = combo_stream.try_next().await? { + match o { + Watched::Deploy(d) => { + let namespace = match d.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let dep = v + .deployments + .entry(d.name_any()) + .or_insert(Deployment::default()); + let nums = d.status.clone().unwrap_or_default(); + dep.running = nums.available_replicas.unwrap_or_default(); + dep.desired = nums.replicas.unwrap_or_default(); + + tracing::info!( + "Got deployment: {}, size: {}/{} un {}", + d.name_any(), + nums.available_replicas.unwrap_or_default(), + nums.replicas.unwrap_or_default(), + nums.unavailable_replicas.unwrap_or_default(), + ) + } + Watched::Pod(p) => { + let namespace = match p.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let pod = v.pods.entry(p.name_any()).or_insert(Pod::default()); + pod.owner = p + .owner_references() + .iter() + .map(|x| format!("{}/{}", x.kind.clone(), x.name.clone())) + .collect::>() + .join(":"); + // TODO: Collect replica sets to match deployments and pods. + let phase = p + .status + .clone() + .unwrap_or_default() + .phase + .unwrap_or_default(); + if phase != pod.status { + // TODO: try to get an idea how to set correct value on restart. + pod.changed = Utc::now(); + } + pod.status = phase; + + tracing::info!("Got pod: {}", p.name_any()) + } + } + } + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/lib.rs b/prover/crates/bin/prover_autoscaler/src/lib.rs new file mode 100644 index 000000000000..0b0d704c9078 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/lib.rs @@ -0,0 +1,6 @@ +pub mod agent; +pub(crate) mod cluster_types; +pub mod global; +pub mod k8s; +pub(crate) mod metrics; +pub mod task_wiring; diff --git a/prover/crates/bin/prover_autoscaler/src/main.rs b/prover/crates/bin/prover_autoscaler/src/main.rs new file mode 100644 index 000000000000..196bd6deb81e --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/main.rs @@ -0,0 +1,148 @@ +use std::time::Duration; + +use anyhow::Context; +use structopt::StructOpt; +use tokio::{ + sync::{oneshot, watch}, + task::JoinHandle, +}; +use zksync_core_leftovers::temp_config_store::read_yaml_repr; +use zksync_protobuf_config::proto::prover_autoscaler; +use zksync_prover_autoscaler::{ + agent, + global::{self}, + k8s::{Scaler, Watcher}, + task_wiring::TaskRunner, +}; +use zksync_utils::wait_for_tasks::ManagedTasks; +use zksync_vlog::prometheus::PrometheusExporterConfig; + +/// Represents the sequential number of the Prover Autoscaler type. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] +pub enum AutoscalerType { + Scaler, + Agent, +} + +impl std::str::FromStr for AutoscalerType { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "scaler" => Ok(AutoscalerType::Scaler), + "agent" => Ok(AutoscalerType::Agent), + other => Err(format!("{} is not a valid AutoscalerType", other)), + } + } +} + +#[derive(Debug, StructOpt)] +#[structopt(name = "Prover Autoscaler", about = "Run Prover Autoscaler components")] +struct Opt { + /// Prover Autoscaler can run Agent or Scaler type. + /// + /// Specify `agent` or `scaler` + #[structopt(short, long, default_value = "agent")] + job: AutoscalerType, + /// Name of the cluster Agent is watching. + #[structopt(long)] + cluster_name: Option, + /// Path to the configuration file. + #[structopt(long)] + config_path: std::path::PathBuf, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let opt = Opt::from_args(); + let general_config = + read_yaml_repr::(&opt.config_path) + .context("general config")?; + let observability_config = general_config + .observability + .context("observability config")?; + let _observability_guard = observability_config.install()?; + // That's unfortunate that there are at least 3 different Duration in rust and we use all 3 in this repo. + // TODO: Consider updating zksync_protobuf to support std::time::Duration. + let graceful_shutdown_timeout = general_config.graceful_shutdown_timeout.unsigned_abs(); + + let (stop_signal_sender, stop_signal_receiver) = oneshot::channel(); + let mut stop_signal_sender = Some(stop_signal_sender); + ctrlc::set_handler(move || { + if let Some(sender) = stop_signal_sender.take() { + sender.send(()).ok(); + } + }) + .context("Error setting Ctrl+C handler")?; + + let (stop_sender, stop_receiver) = watch::channel(false); + + let _ = rustls::crypto::ring::default_provider().install_default(); + let client = kube::Client::try_default().await?; + + tracing::info!("Starting ProverAutoscaler"); + + let mut tasks = vec![]; + + match opt.job { + AutoscalerType::Agent => { + let agent_config = general_config.agent_config.context("agent_config")?; + let exporter_config = PrometheusExporterConfig::pull(agent_config.prometheus_port); + tasks.push(tokio::spawn(exporter_config.run(stop_receiver.clone()))); + + // TODO: maybe get cluster name from curl -H "Metadata-Flavor: Google" + // http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name + let watcher = Watcher::new( + client.clone(), + opt.cluster_name + .context("cluster_name is required for Agent")?, + agent_config.namespaces, + ); + let scaler = Scaler { client }; + tasks.push(tokio::spawn(watcher.clone().run())); + tasks.push(tokio::spawn(agent::run_server( + agent_config.http_port, + watcher, + scaler, + stop_receiver.clone(), + ))) + } + AutoscalerType::Scaler => { + let scaler_config = general_config.scaler_config.context("scaler_config")?; + let interval = scaler_config.scaler_run_interval.unsigned_abs(); + let exporter_config = PrometheusExporterConfig::pull(scaler_config.prometheus_port); + tasks.push(tokio::spawn(exporter_config.run(stop_receiver.clone()))); + let watcher = global::watcher::Watcher::new(scaler_config.agents.clone()); + let queuer = global::queuer::Queuer::new(scaler_config.prover_job_monitor_url.clone()); + let scaler = global::scaler::Scaler::new(watcher.clone(), queuer, scaler_config); + tasks.extend(get_tasks(watcher, scaler, interval, stop_receiver)?); + } + } + + let mut tasks = ManagedTasks::new(tasks); + + tokio::select! { + _ = tasks.wait_single() => {}, + _ = stop_signal_receiver => { + tracing::info!("Stop signal received, shutting down"); + } + } + stop_sender.send(true).ok(); + tasks.complete(graceful_shutdown_timeout).await; + + Ok(()) +} + +fn get_tasks( + watcher: global::watcher::Watcher, + scaler: global::scaler::Scaler, + interval: Duration, + stop_receiver: watch::Receiver, +) -> anyhow::Result>>> { + let mut task_runner = TaskRunner::default(); + + task_runner.add("Watcher", interval, watcher); + task_runner.add("Scaler", interval, scaler); + + Ok(task_runner.spawn(stop_receiver)) +} diff --git a/prover/crates/bin/prover_autoscaler/src/metrics.rs b/prover/crates/bin/prover_autoscaler/src/metrics.rs new file mode 100644 index 000000000000..09cbaa6ba00f --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/metrics.rs @@ -0,0 +1,14 @@ +use vise::{Counter, Gauge, LabeledFamily, Metrics}; +use zksync_config::configs::prover_autoscaler::Gpu; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "autoscaler")] +pub(crate) struct AutoscalerMetrics { + pub protocol_version: Gauge, + pub calls: Counter, + #[metrics(labels = ["target_cluster", "target_namespace", "gpu"])] + pub provers: LabeledFamily<(String, String, Gpu), Gauge, 3>, +} + +#[vise::register] +pub(crate) static AUTOSCALER_METRICS: vise::Global = vise::Global::new(); diff --git a/prover/crates/bin/prover_autoscaler/src/task_wiring.rs b/prover/crates/bin/prover_autoscaler/src/task_wiring.rs new file mode 100644 index 000000000000..9b60145ad9ea --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/task_wiring.rs @@ -0,0 +1,72 @@ +use std::time::Duration; + +use anyhow::Context; +use tracing::Instrument; + +/// Task trait to be run in ProverJobMonitor. +#[async_trait::async_trait] +pub trait Task { + async fn invoke(&self) -> anyhow::Result<()>; +} + +/// Wrapper for Task with a periodic interface. Holds information about the task and provides DB connectivity. +struct PeriodicTask { + job: Box, + name: String, + interval: Duration, +} + +impl PeriodicTask { + async fn run( + &self, + mut stop_receiver: tokio::sync::watch::Receiver, + ) -> anyhow::Result<()> { + tracing::info!( + "Started Task {} with run interval: {:?}", + self.name, + self.interval + ); + + let mut interval = tokio::time::interval(self.interval); + + while !*stop_receiver.borrow_and_update() { + interval.tick().await; + self.job + .invoke() + .instrument(tracing::info_span!("run", service_name = %self.name)) + .await + .context("failed to invoke task")?; + } + tracing::info!("Stop signal received; Task {} is shut down", self.name); + Ok(()) + } +} + +/// Wrapper on a vector of task. Makes adding/spawning tasks and sharing resources ergonomic. +#[derive(Default)] +pub struct TaskRunner { + tasks: Vec, +} + +impl TaskRunner { + pub fn add(&mut self, name: &str, interval: Duration, job: T) { + self.tasks.push(PeriodicTask { + name: name.into(), + interval, + job: Box::new(job), + }); + } + + pub fn spawn( + self, + stop_receiver: tokio::sync::watch::Receiver, + ) -> Vec>> { + self.tasks + .into_iter() + .map(|task| { + let receiver = stop_receiver.clone(); + tokio::spawn(async move { task.run(receiver).await }) + }) + .collect() + } +} diff --git a/prover/docs/05_proving_batch.md b/prover/docs/05_proving_batch.md index e09a44cb0ff7..c35de975bf71 100644 --- a/prover/docs/05_proving_batch.md +++ b/prover/docs/05_proving_batch.md @@ -14,17 +14,25 @@ GPU, which requires an NVIDIA A100 80GB GPU. ### Prerequisites -First of all, you need to install CUDA drivers, all other things will be dealt with by `zk_inception` and `prover_cli` -tools. For that, check the following [guide](./02_setup.md)(you can skip bellman-cuda step). +First of all, you need to install CUDA drivers, all other things will be dealt with by `zkstack` and `prover_cli` tools. +For that, check the following [guide](./02_setup.md)(you can skip bellman-cuda step). Install the prerequisites, which you can find [here](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/setup-dev.md). Note, that if you are not using Google VM instance, you also need to install [gcloud](https://cloud.google.com/sdk/docs/install#deb). -Now, you can use `zk_inception` and `prover_cli` tools for setting up the env and running prover subsystem. +Now, you can use `zkstack` and `prover_cli` tools for setting up the env and running prover subsystem. -```shell -cargo +nightly-2024-08-01 install --git https://github.com/matter-labs/zksync-era/ --locked zk_inception zk_supervisor prover_cli --force +First, install `zkstackup` with: + +```bash +curl -L https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/install | bash +``` + +Then install the most recent version of `zkstack` with: + +```bash +zkstackup ``` ## Initializing system @@ -33,14 +41,14 @@ After you have installed the tool, you can create ecosystem(you need to run only running: ```shell -zk_inception ecosystem create --l1-network=localhost --prover-mode=gpu --wallet-creation=localhost --l1-batch-commit-data-generator-mode=rollup --start-containers=true +zkstack ecosystem create --l1-network=localhost --prover-mode=gpu --wallet-creation=localhost --l1-batch-commit-data-generator-mode=rollup --start-containers=true ``` The command will create the ecosystem and all the necessary components for the prover subsystem. You can leave default values for all the prompts you will see Now, you need to initialize the prover subsystem by running: ```shell -zk_inception prover init --shall-save-to-public-bucket=false --setup-database=true --use-default=true --dont-drop=false +zkstack prover init --shall-save-to-public-bucket=false --setup-database=true --use-default=true --dont-drop=false ``` For prompts you can leave default values as well. @@ -87,13 +95,23 @@ After you have the data, you need to prepare the system to run the batch. So, da the protocol version it should use. You can do that with running ```shell -zk_supervisor prover-version +zkstack dev prover info ``` Example output: ```shell -Current protocol version found in zksync-era: 0.24.2, snark_wrapper: "0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2" +=============================== + +Current prover setup information: + +Protocol version: 0.24.2 + +Snark wrapper: 0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2 + +Database URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_era + +=============================== ``` This command will provide you with the information about the semantic protocol version(you need to know only minor and @@ -118,7 +136,7 @@ prover_cli insert-batch --number= --version=, -} - -impl Wallet { - pub fn random(rng: &mut impl Rng) -> Self { - let private_key = H256::random_using(rng); - let local_wallet = LocalWallet::from_bytes(private_key.as_bytes()).unwrap(); - - Self { - address: Address::from_slice(local_wallet.address().as_bytes()), - private_key: Some(private_key), - } - } - - pub fn new_with_key(private_key: H256) -> Self { - let local_wallet = LocalWallet::from_bytes(private_key.as_bytes()).unwrap(); - Self { - address: Address::from_slice(local_wallet.address().as_bytes()), - private_key: Some(private_key), - } - } - - pub fn from_mnemonic(mnemonic: &str, base_path: &str, index: u32) -> anyhow::Result { - let wallet = MnemonicBuilder::::default() - .phrase(mnemonic) - .derivation_path(&format!("{}/{}", base_path, index))? - .build()?; - let private_key = H256::from_slice(&wallet.signer().to_bytes()); - Ok(Self::new_with_key(private_key)) - } - - pub fn empty() -> Self { - Self { - address: Address::zero(), - private_key: Some(H256::zero()), - } - } -} - -#[test] -fn test_load_localhost_wallets() { - let wallet = Wallet::from_mnemonic( - "stuff slice staff easily soup parent arm payment cotton trade scatter struggle", - "m/44'/60'/0'/0", - 1, - ) - .unwrap(); - assert_eq!( - wallet.address, - Address::from_slice( - ðers::utils::hex::decode("0xa61464658AfeAf65CccaaFD3a512b69A83B77618").unwrap() - ) - ); -} diff --git a/zk_toolbox/crates/zk_inception/build.rs b/zk_toolbox/crates/zk_inception/build.rs deleted file mode 100644 index 43c8d7a5aac9..000000000000 --- a/zk_toolbox/crates/zk_inception/build.rs +++ /dev/null @@ -1,11 +0,0 @@ -use std::path::PathBuf; - -use ethers::contract::Abigen; - -fn main() -> eyre::Result<()> { - let outdir = PathBuf::from(std::env::var("OUT_DIR")?).canonicalize()?; - Abigen::new("ConsensusRegistry", "abi/ConsensusRegistry.json")? - .generate()? - .write_to_file(outdir.join("consensus_registry_abi.rs"))?; - Ok(()) -} diff --git a/zk_toolbox/crates/zk_supervisor/Cargo.toml b/zk_toolbox/crates/zk_supervisor/Cargo.toml deleted file mode 100644 index 158abe4e2ec6..000000000000 --- a/zk_toolbox/crates/zk_supervisor/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "zk_supervisor" -version = "0.1.0" -edition.workspace = true -homepage.workspace = true -license.workspace = true -authors.workspace = true -exclude.workspace = true -repository.workspace = true -description.workspace = true -keywords.workspace = true - -[dependencies] -anyhow.workspace = true -clap.workspace = true -common.workspace = true -config.workspace = true -chrono.workspace = true -ethers.workspace = true -human-panic.workspace = true -strum.workspace = true -tokio.workspace = true -url.workspace = true -xshell.workspace = true -serde.workspace = true -serde_json.workspace = true -clap-markdown.workspace = true -futures.workspace = true -types.workspace = true -serde_yaml.workspace = true -zksync_basic_types.workspace = true -sqruff-lib = { git = "https://github.com/quarylabs/sqruff", version = "0.18.2" } diff --git a/zk_toolbox/crates/zk_supervisor/README.md b/zk_toolbox/crates/zk_supervisor/README.md deleted file mode 100644 index 865bd2f0d579..000000000000 --- a/zk_toolbox/crates/zk_supervisor/README.md +++ /dev/null @@ -1,386 +0,0 @@ -# Command-Line Help for `zk_supervisor` - -This document contains the help content for the `zk_supervisor` command-line program. - -**Command Overview:** - -- [`zk_supervisor`↴](#zk_supervisor) -- [`zk_supervisor database`↴](#zk_supervisor-database) -- [`zk_supervisor database check-sqlx-data`↴](#zk_supervisor-database-check-sqlx-data) -- [`zk_supervisor database drop`↴](#zk_supervisor-database-drop) -- [`zk_supervisor database migrate`↴](#zk_supervisor-database-migrate) -- [`zk_supervisor database new-migration`↴](#zk_supervisor-database-new-migration) -- [`zk_supervisor database prepare`↴](#zk_supervisor-database-prepare) -- [`zk_supervisor database reset`↴](#zk_supervisor-database-reset) -- [`zk_supervisor database setup`↴](#zk_supervisor-database-setup) -- [`zk_supervisor test`↴](#zk_supervisor-test) -- [`zk_supervisor test integration`↴](#zk_supervisor-test-integration) -- [`zk_supervisor test revert`↴](#zk_supervisor-test-revert) -- [`zk_supervisor test recovery`↴](#zk_supervisor-test-recovery) -- [`zk_supervisor test upgrade`↴](#zk_supervisor-test-upgrade) -- [`zk_supervisor test rust`↴](#zk_supervisor-test-rust) -- [`zk_supervisor test l1-contracts`↴](#zk_supervisor-test-l1-contracts) -- [`zk_supervisor test prover`↴](#zk_supervisor-test-prover) -- [`zk_supervisor clean`↴](#zk_supervisor-clean) -- [`zk_supervisor clean all`↴](#zk_supervisor-clean-all) -- [`zk_supervisor clean containers`↴](#zk_supervisor-clean-containers) -- [`zk_supervisor clean contracts-cache`↴](#zk_supervisor-clean-contracts-cache) -- [`zk_supervisor snapshot`↴](#zk_supervisor-snapshot) -- [`zk_supervisor snapshot create`↴](#zk_supervisor-snapshot-create) -- [`zk_supervisor lint`↴](#zk_supervisor-lint) -- [`zk_supervisor fmt`↴](#zk_supervisor-fmt) -- [`zk_supervisor fmt rustfmt`↴](#zk_supervisor-fmt-rustfmt) -- [`zk_supervisor fmt contract`↴](#zk_supervisor-fmt-contract) -- [`zk_supervisor fmt prettier`↴](#zk_supervisor-fmt-prettier) -- [`zk_supervisor prover info`↴](#zk_supervisor-prover-info) -- [`zk_supervisor prover insert-version`↴](#zk_supervisor-prover-insert-version) -- [`zk_supervisor prover insert-batch`↴](#zk_supervisor-prover-insert-batch) - -## `zk_supervisor` - -ZK Toolbox is a set of tools for working with zk stack. - -**Usage:** `zk_supervisor [OPTIONS] ` - -###### **Subcommands:** - -- `database` — Database related commands -- `test` — Run tests -- `clean` — Clean artifacts -- `snapshot` — Snapshots creator -- `lint` — Lint code -- `fmt` — Format code -- `prover-version` — Protocol version used by provers - -###### **Options:** - -- `-v`, `--verbose` — Verbose mode -- `--chain ` — Chain to use -- `--ignore-prerequisites` — Ignores prerequisites checks - -## `zk_supervisor database` - -Database related commands - -**Usage:** `zk_supervisor database ` - -###### **Subcommands:** - -- `check-sqlx-data` — Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked. -- `drop` — Drop databases. If no databases are selected, all databases will be dropped. -- `migrate` — Migrate databases. If no databases are selected, all databases will be migrated. -- `new-migration` — Create new migration -- `prepare` — Prepare sqlx-data.json. If no databases are selected, all databases will be prepared. -- `reset` — Reset databases. If no databases are selected, all databases will be reset. -- `setup` — Setup databases. If no databases are selected, all databases will be setup. - -## `zk_supervisor database check-sqlx-data` - -Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked. - -**Usage:** `zk_supervisor database check-sqlx-data [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database drop` - -Drop databases. If no databases are selected, all databases will be dropped. - -**Usage:** `zk_supervisor database drop [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database migrate` - -Migrate databases. If no databases are selected, all databases will be migrated. - -**Usage:** `zk_supervisor database migrate [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database new-migration` - -Create new migration - -**Usage:** `zk_supervisor database new-migration [OPTIONS]` - -###### **Options:** - -- `--database ` — Database to create new migration for - - Possible values: `prover`, `core` - -- `--name ` — Migration name - -## `zk_supervisor database prepare` - -Prepare sqlx-data.json. If no databases are selected, all databases will be prepared. - -**Usage:** `zk_supervisor database prepare [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database reset` - -Reset databases. If no databases are selected, all databases will be reset. - -**Usage:** `zk_supervisor database reset [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database setup` - -Setup databases. If no databases are selected, all databases will be setup. - -**Usage:** `zk_supervisor database setup [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor test` - -Run tests - -**Usage:** `zk_supervisor test ` - -###### **Subcommands:** - -- `integration` — Run integration tests -- `revert` — Run revert tests -- `recovery` — Run recovery tests -- `upgrade` — Run upgrade tests -- `rust` — Run unit-tests, accepts optional cargo test flags -- `l1-contracts` — Run L1 contracts tests -- `prover` — Run prover tests - -## `zk_supervisor test integration` - -Run integration tests - -**Usage:** `zk_supervisor test integration [OPTIONS]` - -###### **Options:** - -- `-e`, `--external-node` — Run tests for external node - -## `zk_supervisor test revert` - -Run revert tests - -**Usage:** `zk_supervisor test revert [OPTIONS]` - -###### **Options:** - -- `--enable-consensus` — Enable consensus -- `-e`, `--external-node` — Run tests for external node - -## `zk_supervisor test recovery` - -Run recovery tests - -**Usage:** `zk_supervisor test recovery [OPTIONS]` - -###### **Options:** - -- `-s`, `--snapshot` — Run recovery from a snapshot instead of genesis - -## `zk_supervisor test upgrade` - -Run upgrade tests - -**Usage:** `zk_supervisor test upgrade` - -## `zk_supervisor test rust` - -Run unit-tests, accepts optional cargo test flags - -**Usage:** `zk_supervisor test rust [OPTIONS]` - -###### **Options:** - -- `--options ` — Cargo test flags - -## `zk_supervisor test l1-contracts` - -Run L1 contracts tests - -**Usage:** `zk_supervisor test l1-contracts` - -## `zk_supervisor test prover` - -Run prover tests - -**Usage:** `zk_supervisor test prover` - -## `zk_supervisor clean` - -Clean artifacts - -**Usage:** `zk_supervisor clean ` - -###### **Subcommands:** - -- `all` — Remove containers and contracts cache -- `containers` — Remove containers and docker volumes -- `contracts-cache` — Remove contracts caches - -## `zk_supervisor clean all` - -Remove containers and contracts cache - -**Usage:** `zk_supervisor clean all` - -## `zk_supervisor clean containers` - -Remove containers and docker volumes - -**Usage:** `zk_supervisor clean containers` - -## `zk_supervisor clean contracts-cache` - -Remove contracts caches - -**Usage:** `zk_supervisor clean contracts-cache` - -## `zk_supervisor snapshot` - -Snapshots creator - -**Usage:** `zk_supervisor snapshot ` - -###### **Subcommands:** - -- `create` — - -## `zk_supervisor snapshot create` - -**Usage:** `zk_supervisor snapshot create` - -## `zk_supervisor lint` - -Lint code - -**Usage:** `zk_supervisor lint [OPTIONS]` - -###### **Options:** - -- `-c`, `--check` -- `-e`, `--extensions ` - - Possible values: `md`, `sol`, `js`, `ts`, `rs` - -## `zk_supervisor fmt` - -Format code - -**Usage:** `zk_supervisor fmt [OPTIONS] [COMMAND]` - -###### **Subcommands:** - -- `rustfmt` — -- `contract` — -- `prettier` — - -###### **Options:** - -- `-c`, `--check` - -## `zk_supervisor fmt rustfmt` - -**Usage:** `zk_supervisor fmt rustfmt` - -## `zk_supervisor fmt contract` - -**Usage:** `zk_supervisor fmt contract` - -## `zk_supervisor fmt prettier` - -**Usage:** `zk_supervisor fmt prettier [OPTIONS]` - -###### **Options:** - -- `-e`, `--extensions ` - - Possible values: `md`, `sol`, `js`, `ts`, `rs` - -## `zk_supervisor prover info` - -Prints prover protocol version, snark wrapper and prover database URL - -**Usage:** `zk_supervisor prover info` - -## `zk_supervisor prover insert-version` - -Inserts protocol version into prover database. - -**Usage:** `zk_supervisor prover insert-version [OPTIONS]` - -###### **Options:** - -- `--version ` — Protocol version in semantic format(`x.y.z`). Major version should be 0. -- `--snark-wrapper ` — Snark wrapper hash. -- `--default` - use default values for protocol version and snark wrapper hash (the ones found in zksync-era). - -## `zk_supervisor prover insert-batch` - -Inserts batch into prover database. - -**Usage:** `zk_supervisor prover insert-batch` - -###### **Options:** - -- `--number ` — Number of the batch to insert. -- `--version ` — Protocol version in semantic format(`x.y.z`). Major version should be 0. -- `--default` - use default value for protocol version (the one found in zksync-era). - -


- - This document was generated automatically by -clap-markdown. diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs deleted file mode 100644 index 242affd8a71b..000000000000 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ /dev/null @@ -1,151 +0,0 @@ -use clap::{Parser, Subcommand}; -use commands::{ - config_writer::ConfigWriterArgs, contracts::ContractsArgs, database::DatabaseCommands, - lint::LintArgs, prover::ProverCommands, send_transactions::args::SendTransactionsArgs, - snapshot::SnapshotCommands, test::TestCommands, -}; -use common::{ - check_general_prerequisites, - config::{global_config, init_global_config, GlobalConfig}, - error::log_error, - init_prompt_theme, logger, - version::version_message, -}; -use config::EcosystemConfig; -use messages::{ - msg_global_chain_does_not_exist, MSG_CONFIG_WRITER_ABOUT, MSG_CONTRACTS_ABOUT, - MSG_PROVER_VERSION_ABOUT, MSG_SEND_TXNS_ABOUT, MSG_SUBCOMMAND_CLEAN, - MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, MSG_SUBCOMMAND_LINT_ABOUT, - MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, -}; -use xshell::Shell; - -use crate::commands::{clean::CleanCommands, fmt::FmtArgs}; - -mod commands; -mod consts; -mod dals; -mod defaults; -mod messages; - -#[derive(Parser, Debug)] -#[command( - version = version_message(env!("CARGO_PKG_VERSION")), - about -)] -struct Supervisor { - #[command(subcommand)] - command: SupervisorSubcommands, - #[clap(flatten)] - global: SupervisorGlobalArgs, -} - -#[derive(Subcommand, Debug)] -enum SupervisorSubcommands { - #[command(subcommand, about = MSG_SUBCOMMAND_DATABASE_ABOUT, alias = "db")] - Database(DatabaseCommands), - #[command(subcommand, about = MSG_SUBCOMMAND_TESTS_ABOUT, alias = "t")] - Test(TestCommands), - #[command(subcommand, about = MSG_SUBCOMMAND_CLEAN)] - Clean(CleanCommands), - #[command(subcommand, about = MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT)] - Snapshot(SnapshotCommands), - #[command(about = MSG_SUBCOMMAND_LINT_ABOUT, alias = "l")] - Lint(LintArgs), - #[command(about = MSG_SUBCOMMAND_FMT_ABOUT)] - Fmt(FmtArgs), - #[command(hide = true)] - Markdown, - #[command(subcommand, about = MSG_PROVER_VERSION_ABOUT)] - Prover(ProverCommands), - #[command(about = MSG_CONTRACTS_ABOUT)] - Contracts(ContractsArgs), - #[command(about = MSG_CONFIG_WRITER_ABOUT, alias = "o")] - ConfigWriter(ConfigWriterArgs), - #[command(about = MSG_SEND_TXNS_ABOUT)] - SendTransactions(SendTransactionsArgs), -} - -#[derive(Parser, Debug)] -#[clap(next_help_heading = "Global options")] -struct SupervisorGlobalArgs { - /// Verbose mode - #[clap(short, long, global = true)] - verbose: bool, - /// Chain to use - #[clap(long, global = true)] - chain: Option, - /// Ignores prerequisites checks - #[clap(long, global = true)] - ignore_prerequisites: bool, -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - human_panic::setup_panic!(); - - // We must parse arguments before printing the intro, because some autogenerated - // Clap commands (like `--version` would look odd otherwise). - let args = Supervisor::parse(); - - init_prompt_theme(); - - logger::new_empty_line(); - logger::intro(); - - let shell = Shell::new().unwrap(); - init_global_config_inner(&shell, &args.global)?; - - if !global_config().ignore_prerequisites { - check_general_prerequisites(&shell); - } - - match run_subcommand(args, &shell).await { - Ok(_) => {} - Err(error) => { - log_error(error); - std::process::exit(1); - } - } - - Ok(()) -} - -async fn run_subcommand(args: Supervisor, shell: &Shell) -> anyhow::Result<()> { - match args.command { - SupervisorSubcommands::Database(command) => commands::database::run(shell, command).await?, - SupervisorSubcommands::Test(command) => commands::test::run(shell, command).await?, - SupervisorSubcommands::Clean(command) => commands::clean::run(shell, command)?, - SupervisorSubcommands::Snapshot(command) => commands::snapshot::run(shell, command).await?, - SupervisorSubcommands::Markdown => { - clap_markdown::print_help_markdown::(); - } - SupervisorSubcommands::Lint(args) => commands::lint::run(shell, args)?, - SupervisorSubcommands::Fmt(args) => commands::fmt::run(shell.clone(), args).await?, - SupervisorSubcommands::Prover(command) => commands::prover::run(shell, command).await?, - SupervisorSubcommands::Contracts(args) => commands::contracts::run(shell, args)?, - SupervisorSubcommands::ConfigWriter(args) => commands::config_writer::run(shell, args)?, - SupervisorSubcommands::SendTransactions(args) => { - commands::send_transactions::run(shell, args).await? - } - } - Ok(()) -} - -fn init_global_config_inner(shell: &Shell, args: &SupervisorGlobalArgs) -> anyhow::Result<()> { - if let Some(name) = &args.chain { - if let Ok(config) = EcosystemConfig::from_file(shell) { - let chains = config.list_of_chains(); - if !chains.contains(name) { - anyhow::bail!(msg_global_chain_does_not_exist(name, &chains.join(", "))); - } - } - } - - init_global_config(GlobalConfig { - verbose: args.verbose, - chain_name: args.chain.clone(), - ignore_prerequisites: args.ignore_prerequisites, - }); - Ok(()) -} diff --git a/zk_toolbox/zkup/README.md b/zk_toolbox/zkup/README.md deleted file mode 100644 index d6e3e634688c..000000000000 --- a/zk_toolbox/zkup/README.md +++ /dev/null @@ -1,76 +0,0 @@ -# zkup - zk_toolbox Installer - -`zkup` is a script designed to simplify the installation of -[zk_toolbox](https://github.com/matter-labs/zksync-era/tree/main/zk_toolbox). It allows you to install the tool from a -local directory or directly from a GitHub repository. - -## Getting Started - -To install `zkup`, run the following command: - -```bash -curl -L https://raw.githubusercontent.com/matter-labs/zksync-era/main/zk_toolbox/zkup/install | bash -``` - -After installing `zkup`, you can use it to install `zk_toolbox` with: - -```bash -zkup -``` - -## Usage - -The `zkup` script provides various options for installing `zk_toolbox`: - -### Options - -- `-p, --path ` - Specify a local path to install `zk_toolbox` from. This option is ignored if `--repo` is provided. - -- `-r, --repo ` - GitHub repository to install from (e.g., "matter-labs/zksync-era"). Defaults to "matter-labs/zksync-era". - -- `-b, --branch ` - Git branch to use when installing from a repository. Ignored if `--commit` or `--version` is provided. - -- `-c, --commit ` - Git commit hash to use when installing from a repository. Ignored if `--branch` or `--version` is provided. - -- `-v, --version ` - Git tag to use when installing from a repository. Ignored if `--branch` or `--commit` is provided. - -- `--inception` - Installs `zk_inception` from the repository. By default, `zkup` installs `zk_inception` and `zk_supervisor`. - -- `--supervisor` - Installs `zk_supervisor` from the repository. - -### Local Installation - -If you provide a local path using the `-p` or `--path` option, `zkup` will install `zk_toolbox` from that directory. -Note that repository-specific arguments (`--repo`, `--branch`, `--commit`, `--version`) will be ignored in this case to -preserve git state. - -### Repository Installation - -By default, `zkup` installs `zk_toolbox` from the "matter-labs/zksync-era" GitHub repository. You can specify a -different repository, branch, commit, or version using the respective options. If multiple arguments are provided, -`zkup` will prioritize them as follows: - -- `--version` -- `--commit` -- `--branch` - -### Examples - -**Install from a GitHub repository with a specific version:** - -```bash -zkup --repo matter-labs/zksync-era --version 0.1.1 -``` - -**Install from a local path, only installing `zk_inception`:** - -```bash -zkup --path /path/to/local/zk_toolbox --inception -``` diff --git a/zk_toolbox/zkup/install b/zk_toolbox/zkup/install deleted file mode 100755 index 4e24b03dec46..000000000000 --- a/zk_toolbox/zkup/install +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash -set -eo pipefail - -BASE_DIR=${XDG_CONFIG_HOME:-$HOME} -ZKT_DIR=${ZKT_DIR:-"$BASE_DIR/.zkt"} -ZKT_BIN_DIR="$ZKT_DIR/bin" - -BIN_URL="https://raw.githubusercontent.com/matter-labs/zksync-era/main/zk_toolbox/zkup/zkup" -BIN_PATH="$ZKT_BIN_DIR/zkup" - -mkdir -p "$ZKT_BIN_DIR" -curl -sSfL "$BIN_URL" -o "$BIN_PATH" -chmod +x "$BIN_PATH" - -if [[ ":$PATH:" == *":${ZKT_BIN_DIR}:"* ]]; then - echo "zkup: found ${ZKT_BIN_DIR} in PATH" - exit 0 -fi - -case $SHELL in -*/zsh) - PROFILE="${ZDOTDIR-"$HOME"}/.zshenv" - ;; -*/bash) - PROFILE="$HOME/.bashrc" - ;; -*/fish) - PROFILE="$HOME/.config/fish/config.fish" - ;; -*/ash) - PROFILE="$HOME/.profile" - ;; -*) - echo "zkup: could not detect shell, manually add ${ZKT_BIN_DIR} to your PATH." - exit 1 - ;; -esac - -if [[ ! -f "$PROFILE" ]]; then - echo "zkup: Profile file $PROFILE does not exist, creating it." - touch "$PROFILE" -fi - -if [[ "$SHELL" == *"/fish"* ]]; then - echo -e "\n# Added by zkup\nfish_add_path -a $ZKT_BIN_DIR" >>"$PROFILE" - echo "zkup: Added $ZKT_BIN_DIR to PATH in $PROFILE using fish_add_path." -else - echo -e "\n# Added by zkup\nexport PATH=\"\$PATH:$ZKT_BIN_DIR\"" >>"$PROFILE" - echo "zkup: Added $ZKT_BIN_DIR to PATH in $PROFILE." -fi - -echo -echo "Added zkup to PATH." -echo "Run 'source $PROFILE' or start a new terminal session to use zkup." -echo "Then run 'zkup' to install ZK Toolbox." diff --git a/zk_toolbox/zkup/zkup b/zk_toolbox/zkup/zkup deleted file mode 100755 index e6ca17487385..000000000000 --- a/zk_toolbox/zkup/zkup +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/env bash -set -eo pipefail - -BASE_DIR=${XDG_CONFIG_HOME:-$HOME} -ZKT_DIR=${ZKT_DIR:-"$BASE_DIR/.zkt"} -ZKT_BIN_DIR="$ZKT_DIR/bin" - -ZKUP_INSTALL_SUPERVISOR=0 -ZKUP_INSTALL_INCEPTION=0 -ZKUP_ALIAS=0 - -BINS=() - -main() { - parse_args "$@" - - zktoolbox_banner - - check_prerequisites - mkdir -p "$ZKT_BIN_DIR" - - set_bins - - if [ -n "$ZKUP_PATH" ]; then - install_local - else - install_from_repo - fi - - zktoolbox_banner - - for bin in "${BINS[@]}"; do - success "Installed $bin to $ZKT_BIN_DIR/$bin" - done - - if [ $ZKUP_ALIAS -eq 1 ]; then - create_alias - fi -} - -PREREQUISITES=(cargo git) - -check_prerequisites() { - say "Checking prerequisites" - - failed_prerequisites=() - for prerequisite in "${PREREQUISITES[@]}"; do - if ! check_prerequisite "$prerequisite"; then - failed_prerequisites+=("$prerequisite") - fi - done - if [ ${#failed_prerequisites[@]} -gt 0 ]; then - err "The following prerequisites are missing: ${failed_prerequisites[*]}" - exit 1 - fi -} - -check_prerequisite() { - command -v "$1" &>/dev/null -} - -parse_args() { - while [[ $# -gt 0 ]]; do - case $1 in - --) - shift - break - ;; - - -p | --path) - shift - ZKUP_PATH=$1 - ;; - -r | --repo) - shift - ZKUP_REPO=$1 - ;; - -b | --branch) - shift - ZKUP_BRANCH=$1 - ;; - -c | --commit) - shift - ZKUP_COMMIT=$1 - ;; - -v | --version) - shift - ZKUP_VERSION=$1 - ;; - --inception) ZKUP_INSTALL_INCEPTION=1 ;; - --supervisor) ZKUP_INSTALL_SUPERVISOR=1 ;; - -a | --alias) ZKUP_ALIAS=1 ;; - -h | --help) - usage - exit 0 - ;; - *) - err "Unknown argument: $1" - usage - exit 1 - ;; - esac - shift - done -} - -usage() { - cat < Specify a local path to install zk_toolbox from. Ignored if --repo is provided. - -r, --repo GitHub repository to install from (e.g., "matter-labs/zksync-era"). Defaults to "matter-labs/zksync-era". - -b, --branch Git branch to use when installing from a repository. Ignored if --commit or --version is provided. - -c, --commit Git commit hash to use when installing from a repository. Ignored if --branch or --version is provided. - -v, --version Git tag to use when installing from a repository. Ignored if --branch or --commit is provided. - -a, --alias Create aliases zki and zks for zk_inception and zk_supervisor binaries. - --inception Installs the zk_inception binary. Default is to install both zk_inception and zk_supervisor binaries. - --supervisor Installs the zk_supervisor binary. Default is to install both zk_inception and zk_supervisor binaries. - -h, --help Show this help message and exit. - -Examples: - $(basename "$0") --repo matter-labs/zksync-era --version 0.1.1 - $(basename "$0") --path /path/to/local/zk_toolbox --inception -EOF -} - -set_bins() { - if [ $ZKUP_INSTALL_INCEPTION -eq 1 ]; then - BINS+=(zk_inception) - fi - - if [ $ZKUP_INSTALL_SUPERVISOR -eq 1 ]; then - BINS+=(zk_supervisor) - fi - - # Installs both binaries if not option is provided - if [ ${#BINS[@]} -eq 0 ]; then - BINS=(zk_inception zk_supervisor) - fi -} - -install_local() { - if [ ! -d "$ZKUP_PATH/zk_toolbox" ]; then - err "Path $ZKUP_PATH does not contain zk_toolbox" - exit 1 - fi - - if [ -n "$ZKUP_BRANCH" ] || [ -n "$ZKUP_COMMIT" ] || [ -n "$ZKUP_VERSION" ] || [ -n "$ZKUP_REPO" ]; then - warn "Ignoring --repo, --branch, --commit and --version arguments when installing from local path" - fi - - say "Installing zk_toolbox from $ZKUP_PATH" - ensure cd "$ZKUP_PATH"/zk_toolbox - - for bin in "${BINS[@]}"; do - say "Installing $bin" - ensure cargo install --root $ZKT_DIR --path ./crates/$bin --force - done -} - -install_from_repo() { - if [ -n "$ZKUP_PATH" ]; then - warn "Ignoring --path argument when installing from repository" - fi - - ZKUP_REPO=${ZKUP_REPO:-"matter-labs/zksync-era"} - - say "Installing zk_toolbox from $ZKUP_REPO" - - if [ -n "$ZKUP_VERSION" ]; then - if [ -n "$ZKUP_COMMIT" ] || [ -n "$ZKUP_BRANCH" ]; then - warn "Ignoring --commit and --branch arguments when installing by version" - fi - ensure cargo install --root $ZKT_DIR --git "https://github.com/$ZKUP_REPO" --tag "zk_toolbox-v$ZKUP_VERSION" --locked "${BINS[@]}" --force - elif [ -n "$ZKUP_COMMIT" ]; then - if [ -n "$ZKUP_BRANCH" ]; then - warn "Ignoring --branch argument when installing by commit" - fi - ensure cargo install --root $ZKT_DIR --git "https://github.com/$ZKUP_REPO" --rev "$ZKUP_COMMIT" --locked "${BINS[@]}" --force - elif [ -n "$ZKUP_BRANCH" ]; then - ensure cargo install --root $ZKT_DIR --git "https://github.com/$ZKUP_REPO" --branch "$ZKUP_BRANCH" --locked "${BINS[@]}" --force - else - ensure cargo install --root $ZKT_DIR --git "https://github.com/$ZKUP_REPO" --locked "${BINS[@]}" --force - fi -} - -create_alias() { - if [[ "${BINS[@]}" =~ "zk_inception" ]]; then - say "Creating alias 'zki' for zk_inception" - ensure ln -sf "$ZKT_BIN_DIR/zk_inception" "$ZKT_BIN_DIR/zki" - fi - - if [[ "${BINS[@]}" =~ "zk_supervisor" ]]; then - say "Creating alias 'zks' for zk_supervisor" - ensure ln -sf "$ZKT_BIN_DIR/zk_supervisor" "$ZKT_BIN_DIR/zks" - fi -} - -ensure() { - if ! "$@"; then - err "command failed: $*" - exit 1 - fi -} - -say() { - local action="${1%% *}" - local rest="${1#"$action" }" - - echo -e "\033[1;32m$action\033[0m $rest" -} - -success() { - echo -e "\033[1;32m$1\033[0m" -} - -warn() { - echo -e "\033[1;33mWARNING: $1\033[0m" -} - -err() { - echo -e "\033[1;31mERROR: $1\033[0m" >&2 -} - -zktoolbox_banner() { - printf ' - -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - -███████╗██╗ ██╗ ████████╗ ██████╗ ██████╗ ██╗ ██████╗ ██████╗ ██╗ ██╗ -╚══███╔╝██║ ██╔╝ ╚══██╔══╝██╔═══██╗██╔═══██╗██║ ██╔══██╗██╔═══██╗╚██╗██╔╝ - ███╔╝ █████╔╝ ██║ ██║ ██║██║ ██║██║ ██████╔╝██║ ██║ ╚███╔╝ - ███╔╝ ██╔═██╗ ██║ ██║ ██║██║ ██║██║ ██╔══██╗██║ ██║ ██╔██╗ -███████╗██║ ██╗ ██║ ╚██████╔╝╚██████╔╝███████╗██████╔╝╚██████╔╝██╔╝ ██╗ -╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚══════╝╚═════╝ ╚═════╝ ╚═╝ ╚═╝ - - - A Comprehensive Toolkit for Creating and Managing ZK Stack Chains - -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - -Repo : https://github.com/matter-labs/zksync-era/ -Docs : https://docs.zksync.io/ -Contribute : https://github.com/matter-labs/zksync-era/pulls - -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - -' -} - -main "$@" diff --git a/zk_toolbox/CHANGELOG.md b/zkstack_cli/CHANGELOG.md similarity index 100% rename from zk_toolbox/CHANGELOG.md rename to zkstack_cli/CHANGELOG.md diff --git a/zk_toolbox/Cargo.lock b/zkstack_cli/Cargo.lock similarity index 96% rename from zk_toolbox/Cargo.lock rename to zkstack_cli/Cargo.lock index 297ef404698f..8750de36c753 100644 --- a/zk_toolbox/Cargo.lock +++ b/zkstack_cli/Cargo.lock @@ -14,9 +14,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] @@ -161,9 +161,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -172,9 +172,9 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", @@ -531,9 +531,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.23" +version = "1.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bbb537bb4a30b90362caddba8f360c0a56bc13d3a5570028e7197204cb54a17" +checksum = "2e80e3b6a3ab07840e1cae9b0666a63970dc28e8ed5ffbcdacbfc760c281bfc1" dependencies = [ "jobserver", "libc", @@ -579,9 +579,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" dependencies = [ "clap_builder", "clap_derive", @@ -598,9 +598,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstream", "anstyle", @@ -1079,6 +1079,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", + "serde", ] [[package]] @@ -1882,9 +1883,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1897,9 +1898,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1907,15 +1908,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -1935,9 +1936,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-locks" @@ -1951,9 +1952,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", @@ -1962,15 +1963,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -1984,9 +1985,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -2035,9 +2036,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "git_version_macro" @@ -2098,7 +2099,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -2117,7 +2118,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -2140,6 +2141,12 @@ dependencies = [ "allocator-api2", ] +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" + [[package]] name = "hashers" version = "1.0.1" @@ -2272,9 +2279,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -2367,7 +2374,7 @@ dependencies = [ "http 1.1.0", "hyper 1.4.1", "hyper-util", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -2530,12 +2537,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", ] [[package]] @@ -2571,9 +2578,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is_terminal_polyfill" @@ -2634,9 +2641,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "0cb94a0ffd3f3ee755c20f7d8752f45cac88605a4dcf808abcff72873296ec7b" dependencies = [ "wasm-bindgen", ] @@ -3156,21 +3163,18 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.36.4" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.20.1" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1" -dependencies = [ - "portable-atomic", -] +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "open-fastrlp" @@ -3277,7 +3281,7 @@ dependencies = [ "bytes", "http 1.1.0", "opentelemetry", - "reqwest 0.12.7", + "reqwest 0.12.8", ] [[package]] @@ -3294,7 +3298,7 @@ dependencies = [ "opentelemetry-proto", "opentelemetry_sdk", "prost 0.13.3", - "reqwest 0.12.7", + "reqwest 0.12.8", "thiserror", "tokio", "tonic", @@ -3502,7 +3506,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.5.0", + "indexmap 2.6.0", ] [[package]] @@ -3570,18 +3574,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", @@ -3719,9 +3723,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "b3e4daa0dcf6feba26f985457cdf104d4b4256fc5a09547140f3631bb076b19a" dependencies = [ "unicode-ident", ] @@ -4077,9 +4081,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.7" +version = "0.12.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" +checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" dependencies = [ "base64 0.22.1", "bytes", @@ -4103,7 +4107,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile 2.1.3", + "rustls-pemfile 2.2.0", "serde", "serde_json", "serde_urlencoded", @@ -4275,9 +4279,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.13" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" dependencies = [ "once_cell", "rustls-pki-types", @@ -4297,11 +4301,10 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] @@ -4388,9 +4391,9 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" +checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" dependencies = [ "windows-sys 0.59.0", ] @@ -4722,7 +4725,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "itoa", "ryu", "serde", @@ -5003,7 +5006,7 @@ dependencies = [ "hashbrown 0.14.5", "hashlink", "hex", - "indexmap 2.5.0", + "indexmap 2.6.0", "log", "memchr", "once_cell", @@ -5165,8 +5168,9 @@ dependencies = [ [[package]] name = "sqruff-lib" -version = "0.18.2" -source = "git+https://github.com/quarylabs/sqruff#1ccf18a620b93438c0c6b4f9fc88f402f45a1b29" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "676775189e83a98fc603d59fc6d760a66895d511502a538081dac993fde1a09a" dependencies = [ "ahash", "anstyle", @@ -5177,7 +5181,7 @@ dependencies = [ "enum_dispatch", "fancy-regex", "getrandom", - "indexmap 2.5.0", + "indexmap 2.6.0", "itertools 0.13.0", "lazy-regex", "nohash-hasher", @@ -5199,13 +5203,14 @@ dependencies = [ [[package]] name = "sqruff-lib-core" -version = "0.18.2" -source = "git+https://github.com/quarylabs/sqruff#1ccf18a620b93438c0c6b4f9fc88f402f45a1b29" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48ec5ba65376ae9ba3e3dda153668dcb6452a7212ee7b4c9d48e053eb4f0f3fa" dependencies = [ "ahash", "enum_dispatch", "fancy-regex", - "indexmap 2.5.0", + "indexmap 2.6.0", "itertools 0.13.0", "nohash-hasher", "pretty_assertions", @@ -5219,8 +5224,9 @@ dependencies = [ [[package]] name = "sqruff-lib-dialects" -version = "0.18.2" -source = "git+https://github.com/quarylabs/sqruff#1ccf18a620b93438c0c6b4f9fc88f402f45a1b29" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00fa1cd168dad593f8f6996d805acc1fd52c6d0ad0f6f5847a9cc22a6198cfc2" dependencies = [ "ahash", "itertools 0.13.0", @@ -5436,12 +5442,12 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" +checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" dependencies = [ "rustix", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -5624,7 +5630,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.13", + "rustls 0.23.14", "rustls-pki-types", "tokio", ] @@ -5695,7 +5701,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "toml_datetime", "winnow 0.5.40", ] @@ -5706,7 +5712,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", @@ -5975,9 +5981,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" @@ -6002,9 +6008,9 @@ dependencies = [ [[package]] name = "unicode-properties" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" [[package]] name = "unicode-width" @@ -6188,9 +6194,9 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "ef073ced962d62984fb38a36e5fdc1a2b23c9e0e1fa0689bb97afa4202ef6887" dependencies = [ "cfg-if", "once_cell", @@ -6199,9 +6205,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "c4bfab14ef75323f4eb75fa52ee0a3fb59611977fd3240da19b2cf36ff85030e" dependencies = [ "bumpalo", "log", @@ -6214,9 +6220,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.43" +version = "0.4.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" +checksum = "65471f79c1022ffa5291d33520cbbb53b7687b01c2f8e83b57d102eed7ed479d" dependencies = [ "cfg-if", "js-sys", @@ -6226,9 +6232,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "a7bec9830f60924d9ceb3ef99d55c155be8afa76954edffbb5936ff4509474e7" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6236,9 +6242,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "4c74f6e152a76a2ad448e223b0fc0b6b5747649c3d769cc6bf45737bf97d0ed6" dependencies = [ "proc-macro2", "quote", @@ -6249,15 +6255,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "a42f6c679374623f295a8623adfe63d9284091245c3504bde47c17a3ce2777d9" [[package]] name = "web-sys" -version = "0.3.70" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +checksum = "44188d185b5bdcae1052d08bcbcf9091a5524038d4572cc4f4f2bb9d5554ddd9" dependencies = [ "js-sys", "wasm-bindgen", @@ -6681,10 +6687,26 @@ dependencies = [ ] [[package]] -name = "zk_inception" +name = "zkevm_opcode_defs" +version = "0.132.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0769f7b27d8fb06e715da3290c575cac5d04d10a557faef180e847afce50ac4" +dependencies = [ + "bitflags 2.6.0", + "blake2", + "ethereum-types", + "k256 0.11.6", + "lazy_static", + "sha2_ce", + "sha3_ce", +] + +[[package]] +name = "zkstack" version = "0.1.0" dependencies = [ "anyhow", + "chrono", "clap", "clap-markdown", "cliclack", @@ -6692,13 +6714,18 @@ dependencies = [ "config", "ethers", "eyre", + "futures", "human-panic", "lazy_static", + "prost 0.12.6", + "rand", + "reqwest 0.12.8", "secrecy", "serde", "serde_json", "serde_yaml", "slugify-rs", + "sqruff-lib", "strum", "thiserror", "tokio", @@ -6710,46 +6737,10 @@ dependencies = [ "zksync_config", "zksync_consensus_crypto", "zksync_consensus_roles", -] - -[[package]] -name = "zk_supervisor" -version = "0.1.0" -dependencies = [ - "anyhow", - "chrono", - "clap", - "clap-markdown", - "common", - "config", - "ethers", - "futures", - "human-panic", - "serde", - "serde_json", - "serde_yaml", - "sqruff-lib", - "strum", - "tokio", - "types", - "url", - "xshell", - "zksync_basic_types", -] - -[[package]] -name = "zkevm_opcode_defs" -version = "0.132.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0769f7b27d8fb06e715da3290c575cac5d04d10a557faef180e847afce50ac4" -dependencies = [ - "bitflags 2.6.0", - "blake2", - "ethereum-types", - "k256 0.11.6", - "lazy_static", - "sha2_ce", - "sha3_ce", + "zksync_consensus_utils", + "zksync_protobuf", + "zksync_protobuf_build", + "zksync_protobuf_config", ] [[package]] @@ -6773,9 +6764,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4724d51934e475c846ba9e6ed169e25587385188b928a9ecfbbf616092a1c17" +checksum = "035269d811b3770debca372141ab64cad067dce8e58cb39a48cb7617d30c626b" dependencies = [ "anyhow", "once_cell", @@ -6798,7 +6789,11 @@ dependencies = [ "rand", "secrecy", "serde", + "strum", + "strum_macros", + "time", "url", + "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -6807,9 +6802,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7760e7a140f16f0435fbf2ad9a4b09feaad74568d05b553751d222f4803a42e" +checksum = "49e38d1b5ed28c66e785caff53ea4863375555d818aafa03290397192dd3e665" dependencies = [ "anyhow", "blst", @@ -6828,9 +6823,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96f903187836210602beba27655e111e22efb229ef90bd2a95a3d6799b31685c" +checksum = "e49fbd4e69b276058f3dfc06cf6ada0e8caa6ed826e81289e4d596da95a0f17a" dependencies = [ "anyhow", "bit-vec", @@ -6850,9 +6845,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1020308512c01ab80327fb874b5b61c6fd513a6b26c8a5fce3e077600da04e4b" +checksum = "10bac8f471b182d4fa3d40cf158aac3624fe636a1ff0b4cf3fe26a0e20c68a42" dependencies = [ "anyhow", "rand", @@ -6901,9 +6896,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2d9ce9b9697daae6023c8da5cfe8764690a9d9c91ff32b8e1e54a7c8301fb3" +checksum = "abd55c64f54cb10967a435422f66ff5880ae14a232b245517c7ce38da32e0cab" dependencies = [ "anyhow", "bit-vec", @@ -6922,9 +6917,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903c23a12e160a703f9b68d0dd961daa24156af912ca1bc9efb74969f3acc645" +checksum = "4121952bcaf711005dd554612fc6e2de9b30cb58088508df87f1d38046ce8ac8" dependencies = [ "anyhow", "heck", @@ -6948,6 +6943,7 @@ dependencies = [ "secrecy", "serde_json", "serde_yaml", + "time", "tracing", "zksync_basic_types", "zksync_config", @@ -7009,7 +7005,7 @@ dependencies = [ "hex", "num", "once_cell", - "reqwest 0.12.7", + "reqwest 0.12.8", "serde", "serde_json", "thiserror", diff --git a/zk_toolbox/Cargo.toml b/zkstack_cli/Cargo.toml similarity index 81% rename from zk_toolbox/Cargo.toml rename to zkstack_cli/Cargo.toml index e6aec659bd60..a805cf85d518 100644 --- a/zk_toolbox/Cargo.toml +++ b/zkstack_cli/Cargo.toml @@ -3,8 +3,7 @@ members = [ "crates/common", "crates/config", "crates/types", - "crates/zk_inception", - "crates/zk_supervisor", + "crates/zkstack", "crates/git_version_macro", ] resolver = "2" @@ -16,8 +15,8 @@ homepage = "https://zksync.io/" license = "MIT OR Apache-2.0" authors = ["The Matter Labs Team "] exclude = ["./github"] -repository = "https://github.com/matter-labs/zk_toolbox/" -description = "ZK Toolbox is a set of tools for working with zk stack." +repository = "https://github.com/matter-labs/zksync-era/tree/main/zkstack_cli/" +description = "ZK Stack CLI is a set of tools for working with zk stack." keywords = ["zk", "cryptography", "blockchain", "ZKStack", "ZKsync"] @@ -32,9 +31,11 @@ git_version_macro = { path = "crates/git_version_macro" } zksync_config = { path = "../core/lib/config" } zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } -zksync_consensus_roles = "=0.3.0" -zksync_consensus_crypto = "=0.3.0" -zksync_protobuf = "=0.3.0" +zksync_consensus_roles = "=0.5.0" +zksync_consensus_crypto = "=0.5.0" +zksync_consensus_utils = "=0.5.0" +zksync_protobuf = "=0.5.0" +zksync_protobuf_build = "=0.5.0" # External dependencies anyhow = "1.0.82" @@ -49,6 +50,7 @@ futures = "0.3.30" human-panic = "2.0" lazy_static = "1.4.0" once_cell = "1.19.0" +prost = "0.12.1" rand = "0.8.5" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/zk_toolbox/README.md b/zkstack_cli/README.md similarity index 85% rename from zk_toolbox/README.md rename to zkstack_cli/README.md index 6197a79eec99..f1c92cc3d2e3 100644 --- a/zk_toolbox/README.md +++ b/zkstack_cli/README.md @@ -1,11 +1,7 @@ -# zk_toolbox +# ZK Stack CLI -Toolkit for creating and managing ZK Stack chains. - -## ZK Inception - -`ZK Inception` facilitates the creation and management of ZK Stacks. Commands are interactive but can also accept -arguments via the command line. +Toolkit for creating and managing ZK Stack chains. `ZK Stack CLI` facilitates the creation and management of ZK Stacks. +Commands are interactive but can also accept arguments via the command line. ### Dependencies @@ -14,19 +10,25 @@ dependencies on your machine. Ignore the Environment section for now. ### Installation -Install `zk_inception` from Git: +You can use `zkstackup` to install and manage `zkstack`: + +```bash +curl -L https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/install | bash +``` + +Then install the most recent version with: ```bash -cargo install --git https://github.com/matter-labs/zksync-era/ --locked zk_inception zk_supervisor --force +zkstackup ``` Or manually build from a local copy of the [ZKsync](https://github.com/matter-labs/zksync-era/) repository: ```bash -./bin/zkt +zkstackup --local ``` -This command installs `zk_inception` and `zk_supervisor` from the current repository. +This command installs `zkstack` from the current repository. ### Foundry Integration @@ -51,13 +53,13 @@ BridgeHub, shared bridges, and state transition managers. To create a ZK Stack project, start by creating an ecosystem: ```bash -zk_inception ecosystem create +zkstack ecosystem create ``` If you choose not to start database & L1 containers after creating the ecosystem, you can later run: ```bash -zk_inception containers +zkstack containers ``` Execute subsequent commands from within the created ecosystem folder: @@ -71,14 +73,14 @@ cd path/to/ecosystem/name If the ecosystem has never been deployed before, initialize it: ```bash -zk_inception ecosystem init +zkstack ecosystem init ``` This initializes the first ZK chain, which becomes the default. Override with `--chain ` if needed. For default params, use: ```bash -zk_inception ecosystem init --dev +zkstack ecosystem init --dev ``` If the process gets stuck, resume it with `--resume`. This flag keeps track of already sent transactions and sends new @@ -98,7 +100,7 @@ To verify contracts, use the `--verify` flag. To change the default ZK chain: ```bash -zk_inception ecosystem change-default-chain +zkstack ecosystem change-default-chain ``` IMPORTANT: Currently, you cannot use an existing ecosystem to register a new chain. This feature will be added in the @@ -109,19 +111,19 @@ future. To setup [era-observability](https://github.com/matter-labs/era-observability): ```bash -zk_inception ecosystem setup-observability +zkstack ecosystem setup-observability ``` Or run: ```bash -zk_inception ecosystem init --observability +zkstack ecosystem init --observability ``` To start observability containers: ```bash -zk_inception containers --observability +zkstack containers --observability ``` ### ZK Chain @@ -131,7 +133,7 @@ zk_inception containers --observability The first ZK chain is generated upon ecosystem creation. Create additional chains and switch between them: ```bash -zk_inception chain create +zkstack chain create ``` #### Init @@ -139,7 +141,7 @@ zk_inception chain create Deploy contracts and initialize Zk Chain: ```bash -zk_inception chain init +zkstack chain init ``` This registers the chain in the BridgeHub and deploys all necessary contracts. Manual initialization steps: @@ -154,7 +156,7 @@ by a third party). To run the chain: ```bash -zk_inception server +zkstack server ``` You can specify the component you want to run using `--components` flag @@ -180,13 +182,13 @@ information. Initialize the prover: ```bash -zk_inception prover init +zkstack prover init ``` Run the prover: ```bash -zk_inception prover run +zkstack prover run ``` Specify the prover component with `--component `. Components: @@ -202,13 +204,13 @@ For `witness-generator`, specify the round with `--round `. Rounds: Download required binaries (`solc`, `zksolc`, `vyper`, `zkvyper`): ```bash -zk_inception contract-verifier init +zkstack contract-verifier init ``` Run the contract verifier: ```bash -zk_inception contract-verifier run +zkstack contract-verifier run ``` ### External Node @@ -220,7 +222,7 @@ Commands for running an external node: Prepare configs: ```bash -zk_inception en configs +zkstack en configs ``` This ensures no port conflicts with the main node. @@ -230,7 +232,7 @@ This ensures no port conflicts with the main node. Prepare the databases: ```bash -zk_inception en init +zkstack en init ``` #### Run @@ -238,7 +240,7 @@ zk_inception en init Run the external node: ```bash -zk_inception en run +zkstack en run ``` ### Portal @@ -247,7 +249,7 @@ Once you have at least one chain initialized, you can run the [portal](https://g web-app to bridge tokens between L1 and L2 and more: ```bash -zk_inception portal +zkstack portal ``` This command will start the dockerized portal app using configuration from `apps/portal.config.json` file inside your @@ -263,7 +265,7 @@ contracts and more. First, each chain should be initialized: ```bash -zk_inception explorer init +zkstack explorer init ``` This command creates a database to store explorer data and generatesdocker compose file with explorer services @@ -272,7 +274,7 @@ This command creates a database to store explorer data and generatesdocker compo Next, for each chain you want to have an explorer, you need to start its backend services: ```bash -zk_inception explorer backend --chain +zkstack explorer backend --chain ``` This command uses previously created docker compose file to start the services (api, data fetcher, worker) required for @@ -281,7 +283,7 @@ the explorer. Finally, you can run the explorer app: ```bash -zk_inception explorer run +zkstack explorer run ``` This command will start the dockerized explorer app using configuration from `apps/explorer.config.json` file inside @@ -293,22 +295,22 @@ your ecosystem directory. You can edit this file to configure the app if needed. To update your node: ```bash -zk_inception update +zkstack update ``` This command pulls the latest changes, syncs the general config for all chains, and raises a warning if L1 upgrades are needed. -## ZK Supervisor +## Dev -Tools for developing ZKsync. +The subcommand `zkstack dev` offers tools for developing ZKsync. ### Database Commands for database manipulation: ```bash -zk_supervisor db +zkstack dev db ``` Possible commands: @@ -326,7 +328,7 @@ Possible commands: Clean artifacts: ```bash -zk_supervisor clean +zkstack dev clean ``` Possible commands: @@ -340,7 +342,7 @@ Possible commands: Run ZKsync tests: ```bash -zk_supervisor test +zkstack dev test ``` Possible commands: @@ -358,7 +360,7 @@ Possible commands: Create a snapshot of the current chain: ```bash -zks snapshot create +zkstack dev snapshot create ``` ### Contracts @@ -366,7 +368,7 @@ zks snapshot create Build contracts: ```bash -zks contracts +zkstack dev contracts ``` ### Format @@ -374,7 +376,7 @@ zks contracts Format code: ```bash -zks fmt +zkstack dev fmt ``` By default, this command runs all formatters. To run a specific fomatter use the following subcommands: @@ -388,7 +390,7 @@ By default, this command runs all formatters. To run a specific fomatter use the Lint code: ```bash -zks lint +zkstack dev lint ``` By default, this command runs the linter on all files. To target specific file types, use the `--target` option. diff --git a/zk_toolbox/crates/common/Cargo.toml b/zkstack_cli/crates/common/Cargo.toml similarity index 100% rename from zk_toolbox/crates/common/Cargo.toml rename to zkstack_cli/crates/common/Cargo.toml diff --git a/zk_toolbox/crates/common/src/cmd.rs b/zkstack_cli/crates/common/src/cmd.rs similarity index 100% rename from zk_toolbox/crates/common/src/cmd.rs rename to zkstack_cli/crates/common/src/cmd.rs diff --git a/zk_toolbox/crates/common/src/config.rs b/zkstack_cli/crates/common/src/config.rs similarity index 100% rename from zk_toolbox/crates/common/src/config.rs rename to zkstack_cli/crates/common/src/config.rs diff --git a/zk_toolbox/crates/common/src/db.rs b/zkstack_cli/crates/common/src/db.rs similarity index 100% rename from zk_toolbox/crates/common/src/db.rs rename to zkstack_cli/crates/common/src/db.rs diff --git a/zk_toolbox/crates/common/src/docker.rs b/zkstack_cli/crates/common/src/docker.rs similarity index 100% rename from zk_toolbox/crates/common/src/docker.rs rename to zkstack_cli/crates/common/src/docker.rs diff --git a/zk_toolbox/crates/common/src/ethereum.rs b/zkstack_cli/crates/common/src/ethereum.rs similarity index 96% rename from zk_toolbox/crates/common/src/ethereum.rs rename to zkstack_cli/crates/common/src/ethereum.rs index 33caaad9789e..2100746fecff 100644 --- a/zk_toolbox/crates/common/src/ethereum.rs +++ b/zkstack_cli/crates/common/src/ethereum.rs @@ -6,18 +6,17 @@ use ethers::{ middleware::MiddlewareBuilder, prelude::{Http, LocalWallet, Provider, Signer, SignerMiddleware}, providers::Middleware, - types::{Address, TransactionRequest, H256}, + types::{Address, TransactionRequest}, }; use types::TokenInfo; use crate::{logger, wallets::Wallet}; pub fn create_ethers_client( - private_key: H256, + mut wallet: LocalWallet, l1_rpc: String, chain_id: Option, ) -> anyhow::Result, ethers::prelude::Wallet>> { - let mut wallet = LocalWallet::from_bytes(private_key.as_bytes())?; if let Some(chain_id) = chain_id { wallet = wallet.with_chain_id(chain_id); } diff --git a/zk_toolbox/crates/common/src/external_node.rs b/zkstack_cli/crates/common/src/external_node.rs similarity index 100% rename from zk_toolbox/crates/common/src/external_node.rs rename to zkstack_cli/crates/common/src/external_node.rs diff --git a/zk_toolbox/crates/common/src/files.rs b/zkstack_cli/crates/common/src/files.rs similarity index 100% rename from zk_toolbox/crates/common/src/files.rs rename to zkstack_cli/crates/common/src/files.rs diff --git a/zk_toolbox/crates/common/src/forge.rs b/zkstack_cli/crates/common/src/forge.rs similarity index 97% rename from zk_toolbox/crates/common/src/forge.rs rename to zkstack_cli/crates/common/src/forge.rs index 7fd5399cc66b..bef285fb89b2 100644 --- a/zk_toolbox/crates/common/src/forge.rs +++ b/zkstack_cli/crates/common/src/forge.rs @@ -143,10 +143,12 @@ impl ForgeScript { } // Do not start the script if balance is not enough - pub fn private_key(&self) -> Option { + pub fn private_key(&self) -> Option { self.args.args.iter().find_map(|a| { if let ForgeScriptArg::PrivateKey { private_key } = a { - Some(H256::from_str(private_key).unwrap()) + let key = H256::from_str(private_key).unwrap(); + let key = LocalWallet::from_bytes(key.as_bytes()).unwrap(); + Some(key) } else { None } @@ -164,11 +166,7 @@ impl ForgeScript { } pub fn address(&self) -> Option
{ - self.private_key().and_then(|a| { - LocalWallet::from_bytes(a.as_bytes()) - .ok() - .map(|a| Address::from_slice(a.address().as_bytes())) - }) + self.private_key().map(|k| k.address()) } pub async fn get_the_balance(&self) -> anyhow::Result> { @@ -280,7 +278,7 @@ pub struct ForgeScriptArgs { pub resume: bool, /// List of additional arguments that can be passed through the CLI. /// - /// e.g.: `zk_inception init -a --private-key=` + /// e.g.: `zkstack init -a --private-key=` #[clap(long, short)] #[arg(trailing_var_arg = true, allow_hyphen_values = true, hide = false)] additional_args: Vec, diff --git a/zk_toolbox/crates/common/src/git.rs b/zkstack_cli/crates/common/src/git.rs similarity index 100% rename from zk_toolbox/crates/common/src/git.rs rename to zkstack_cli/crates/common/src/git.rs diff --git a/zk_toolbox/crates/common/src/lib.rs b/zkstack_cli/crates/common/src/lib.rs similarity index 90% rename from zk_toolbox/crates/common/src/lib.rs rename to zkstack_cli/crates/common/src/lib.rs index c23ef9202261..b0fbdab0d1b0 100644 --- a/zk_toolbox/crates/common/src/lib.rs +++ b/zkstack_cli/crates/common/src/lib.rs @@ -18,7 +18,7 @@ pub mod yaml; pub use prerequisites::{ check_general_prerequisites, check_prerequisites, GCLOUD_PREREQUISITE, GPU_PREREQUISITES, - PROVER_CLI_PREREQUISITE, WGET_PREREQUISITE, + PROVER_CLI_PREREQUISITE, }; pub use prompt::{init_prompt_theme, Prompt, PromptConfirm, PromptSelect}; pub use term::{error, logger, spinner}; diff --git a/zk_toolbox/crates/common/src/prerequisites.rs b/zkstack_cli/crates/common/src/prerequisites.rs similarity index 95% rename from zk_toolbox/crates/common/src/prerequisites.rs rename to zkstack_cli/crates/common/src/prerequisites.rs index 665096d8486e..7845249a1ed3 100644 --- a/zk_toolbox/crates/common/src/prerequisites.rs +++ b/zkstack_cli/crates/common/src/prerequisites.rs @@ -45,11 +45,6 @@ pub const GPU_PREREQUISITES: [Prerequisite; 3] = [ }, // CUDA GPU driver ]; -pub const WGET_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { - name: "wget", - download_link: "https://www.gnu.org/software/wget/", -}]; - pub const GCLOUD_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { name: "gcloud", download_link: "https://cloud.google.com/sdk/docs/install", diff --git a/zk_toolbox/crates/common/src/prompt/confirm.rs b/zkstack_cli/crates/common/src/prompt/confirm.rs similarity index 100% rename from zk_toolbox/crates/common/src/prompt/confirm.rs rename to zkstack_cli/crates/common/src/prompt/confirm.rs diff --git a/zk_toolbox/crates/common/src/prompt/input.rs b/zkstack_cli/crates/common/src/prompt/input.rs similarity index 100% rename from zk_toolbox/crates/common/src/prompt/input.rs rename to zkstack_cli/crates/common/src/prompt/input.rs diff --git a/zk_toolbox/crates/common/src/prompt/mod.rs b/zkstack_cli/crates/common/src/prompt/mod.rs similarity index 100% rename from zk_toolbox/crates/common/src/prompt/mod.rs rename to zkstack_cli/crates/common/src/prompt/mod.rs diff --git a/zk_toolbox/crates/common/src/prompt/select.rs b/zkstack_cli/crates/common/src/prompt/select.rs similarity index 100% rename from zk_toolbox/crates/common/src/prompt/select.rs rename to zkstack_cli/crates/common/src/prompt/select.rs diff --git a/zk_toolbox/crates/common/src/server.rs b/zkstack_cli/crates/common/src/server.rs similarity index 100% rename from zk_toolbox/crates/common/src/server.rs rename to zkstack_cli/crates/common/src/server.rs diff --git a/zk_toolbox/crates/common/src/term/error.rs b/zkstack_cli/crates/common/src/term/error.rs similarity index 100% rename from zk_toolbox/crates/common/src/term/error.rs rename to zkstack_cli/crates/common/src/term/error.rs diff --git a/zk_toolbox/crates/common/src/term/logger.rs b/zkstack_cli/crates/common/src/term/logger.rs similarity index 97% rename from zk_toolbox/crates/common/src/term/logger.rs rename to zkstack_cli/crates/common/src/term/logger.rs index 17e518d9ad92..035e81dc1355 100644 --- a/zk_toolbox/crates/common/src/term/logger.rs +++ b/zkstack_cli/crates/common/src/term/logger.rs @@ -14,7 +14,7 @@ fn term_write(msg: impl Display) { } pub fn intro() { - cliclak_intro(style(" ZKsync toolbox ").on_cyan().black()).unwrap(); + cliclak_intro(style(" ZK Stack CLI ").on_cyan().black()).unwrap(); } pub fn outro(msg: impl Display) { diff --git a/zk_toolbox/crates/common/src/term/mod.rs b/zkstack_cli/crates/common/src/term/mod.rs similarity index 100% rename from zk_toolbox/crates/common/src/term/mod.rs rename to zkstack_cli/crates/common/src/term/mod.rs diff --git a/zk_toolbox/crates/common/src/term/spinner.rs b/zkstack_cli/crates/common/src/term/spinner.rs similarity index 100% rename from zk_toolbox/crates/common/src/term/spinner.rs rename to zkstack_cli/crates/common/src/term/spinner.rs diff --git a/zk_toolbox/crates/common/src/version.rs b/zkstack_cli/crates/common/src/version.rs similarity index 100% rename from zk_toolbox/crates/common/src/version.rs rename to zkstack_cli/crates/common/src/version.rs diff --git a/zkstack_cli/crates/common/src/wallets.rs b/zkstack_cli/crates/common/src/wallets.rs new file mode 100644 index 000000000000..43a9864474cc --- /dev/null +++ b/zkstack_cli/crates/common/src/wallets.rs @@ -0,0 +1,102 @@ +use ethers::{ + core::rand::{CryptoRng, Rng}, + signers::{coins_bip39::English, LocalWallet, MnemonicBuilder, Signer}, + types::{Address, H256}, +}; +use serde::{Deserialize, Serialize}; +use types::parse_h256; + +#[derive(Serialize, Deserialize)] +struct WalletSerde { + pub address: Address, + pub private_key: Option, +} + +#[derive(Debug, Clone)] +pub struct Wallet { + pub address: Address, + pub private_key: Option, +} + +impl<'de> Deserialize<'de> for Wallet { + fn deserialize>(d: D) -> Result { + let x = WalletSerde::deserialize(d)?; + Ok(match x.private_key { + None => Self { + address: x.address, + private_key: None, + }, + Some(k) => { + let k = LocalWallet::from_bytes(k.as_bytes()).map_err(serde::de::Error::custom)?; + if k.address() != x.address { + return Err(serde::de::Error::custom(format!( + "address does not match private key: got address {:#x}, want {:#x}", + x.address, + k.address(), + ))); + } + Self::new(k) + } + }) + } +} + +impl Serialize for Wallet { + fn serialize(&self, s: S) -> Result { + WalletSerde { + address: self.address, + private_key: self.private_key_h256(), + } + .serialize(s) + } +} + +impl Wallet { + pub fn private_key_h256(&self) -> Option { + self.private_key + .as_ref() + .map(|k| parse_h256(k.signer().to_bytes().as_slice()).unwrap()) + } + + pub fn random(rng: &mut (impl Rng + CryptoRng)) -> Self { + Self::new(LocalWallet::new(rng)) + } + + pub fn new(private_key: LocalWallet) -> Self { + Self { + address: private_key.address(), + private_key: Some(private_key), + } + } + + pub fn from_mnemonic(mnemonic: &str, base_path: &str, index: u32) -> anyhow::Result { + let wallet = MnemonicBuilder::::default() + .phrase(mnemonic) + .derivation_path(&format!("{}/{}", base_path, index))? + .build()?; + Ok(Self::new(wallet)) + } + + pub fn empty() -> Self { + Self { + address: Address::zero(), + private_key: None, + } + } +} + +#[test] +fn test_load_localhost_wallets() { + let wallet = Wallet::from_mnemonic( + "stuff slice staff easily soup parent arm payment cotton trade scatter struggle", + "m/44'/60'/0'/0", + 1, + ) + .unwrap(); + assert_eq!( + wallet.address, + Address::from_slice( + ðers::utils::hex::decode("0xa61464658AfeAf65CccaaFD3a512b69A83B77618").unwrap() + ) + ); +} diff --git a/zk_toolbox/crates/common/src/yaml.rs b/zkstack_cli/crates/common/src/yaml.rs similarity index 100% rename from zk_toolbox/crates/common/src/yaml.rs rename to zkstack_cli/crates/common/src/yaml.rs diff --git a/zk_toolbox/crates/config/Cargo.toml b/zkstack_cli/crates/config/Cargo.toml similarity index 100% rename from zk_toolbox/crates/config/Cargo.toml rename to zkstack_cli/crates/config/Cargo.toml diff --git a/zk_toolbox/crates/config/src/apps.rs b/zkstack_cli/crates/config/src/apps.rs similarity index 96% rename from zk_toolbox/crates/config/src/apps.rs rename to zkstack_cli/crates/config/src/apps.rs index 697b35b0851b..3bd611bdc32a 100644 --- a/zk_toolbox/crates/config/src/apps.rs +++ b/zkstack_cli/crates/config/src/apps.rs @@ -5,7 +5,7 @@ use xshell::Shell; use crate::{ consts::{APPS_CONFIG_FILE, DEFAULT_EXPLORER_PORT, DEFAULT_PORTAL_PORT, LOCAL_CONFIGS_PATH}, - traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkStackConfig}, }; /// Ecosystem level configuration for the apps (portal and explorer). @@ -20,7 +20,7 @@ pub struct AppEcosystemConfig { pub http_port: u16, } -impl ZkToolboxConfig for AppsEcosystemConfig {} +impl ZkStackConfig for AppsEcosystemConfig {} impl FileConfigWithDefaultName for AppsEcosystemConfig { const FILE_NAME: &'static str = APPS_CONFIG_FILE; } diff --git a/zk_toolbox/crates/config/src/chain.rs b/zkstack_cli/crates/config/src/chain.rs similarity index 96% rename from zk_toolbox/crates/config/src/chain.rs rename to zkstack_cli/crates/config/src/chain.rs index affc8ccc770c..6c82d6ef3c37 100644 --- a/zk_toolbox/crates/config/src/chain.rs +++ b/zkstack_cli/crates/config/src/chain.rs @@ -16,7 +16,7 @@ use crate::{ create_localhost_wallets, traits::{ FileConfigWithDefaultName, ReadConfig, ReadConfigWithBasePath, SaveConfig, - SaveConfigWithBasePath, ZkToolboxConfig, + SaveConfigWithBasePath, ZkStackConfig, }, ContractsConfig, GeneralConfig, GenesisConfig, SecretsConfig, WalletsConfig, }; @@ -87,8 +87,8 @@ impl ChainConfig { pub fn get_wallets_config(&self) -> anyhow::Result { let path = self.configs.join(WALLETS_FILE); - if let Ok(wallets) = WalletsConfig::read(self.get_shell(), &path) { - return Ok(wallets); + if self.get_shell().path_exists(&path) { + return WalletsConfig::read(self.get_shell(), &path); } if self.wallet_creation == WalletCreation::Localhost { let wallets = create_localhost_wallets(self.get_shell(), &self.link_to_code, self.id)?; @@ -165,4 +165,4 @@ impl FileConfigWithDefaultName for ChainConfigInternal { const FILE_NAME: &'static str = CONFIG_NAME; } -impl ZkToolboxConfig for ChainConfigInternal {} +impl ZkStackConfig for ChainConfigInternal {} diff --git a/zk_toolbox/crates/config/src/consensus_config.rs b/zkstack_cli/crates/config/src/consensus_config.rs similarity index 100% rename from zk_toolbox/crates/config/src/consensus_config.rs rename to zkstack_cli/crates/config/src/consensus_config.rs diff --git a/zk_toolbox/crates/config/src/consensus_secrets.rs b/zkstack_cli/crates/config/src/consensus_secrets.rs similarity index 67% rename from zk_toolbox/crates/config/src/consensus_secrets.rs rename to zkstack_cli/crates/config/src/consensus_secrets.rs index 0e5c4592d2fc..da551a452799 100644 --- a/zk_toolbox/crates/config/src/consensus_secrets.rs +++ b/zkstack_cli/crates/config/src/consensus_secrets.rs @@ -2,13 +2,13 @@ use std::path::Path; use xshell::Shell; use zksync_config::configs::consensus::ConsensusSecrets; -use zksync_protobuf_config::decode_yaml_repr; +use zksync_protobuf_config::read_yaml_repr; use crate::traits::ReadConfig; impl ReadConfig for ConsensusSecrets { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/consts.rs b/zkstack_cli/crates/config/src/consts.rs similarity index 97% rename from zk_toolbox/crates/config/src/consts.rs rename to zkstack_cli/crates/config/src/consts.rs index 80b204cc6191..f462ce33b8f8 100644 --- a/zk_toolbox/crates/config/src/consts.rs +++ b/zkstack_cli/crates/config/src/consts.rs @@ -62,8 +62,6 @@ pub const DEFAULT_EXPLORER_WORKER_PORT: u16 = 3001; pub const DEFAULT_EXPLORER_API_PORT: u16 = 3002; /// Default port for the explorer data fetcher service pub const DEFAULT_EXPLORER_DATA_FETCHER_PORT: u16 = 3040; -/// Default port for consensus service -pub const DEFAULT_CONSENSUS_PORT: u16 = 3054; pub const EXPLORER_API_DOCKER_IMAGE: &str = "matterlabs/block-explorer-api"; pub const EXPLORER_DATA_FETCHER_DOCKER_IMAGE: &str = "matterlabs/block-explorer-data-fetcher"; diff --git a/zk_toolbox/crates/config/src/contracts.rs b/zkstack_cli/crates/config/src/contracts.rs similarity index 97% rename from zk_toolbox/crates/config/src/contracts.rs rename to zkstack_cli/crates/config/src/contracts.rs index 8296aa188527..e6676989e68c 100644 --- a/zk_toolbox/crates/config/src/contracts.rs +++ b/zkstack_cli/crates/config/src/contracts.rs @@ -11,7 +11,7 @@ use crate::{ }, register_chain::output::RegisterChainOutput, }, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, }; #[derive(Debug, Deserialize, Serialize, Clone, Default)] @@ -114,7 +114,7 @@ impl FileConfigWithDefaultName for ContractsConfig { const FILE_NAME: &'static str = CONTRACTS_FILE; } -impl ZkToolboxConfig for ContractsConfig {} +impl ZkStackConfig for ContractsConfig {} #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] pub struct EcosystemContracts { @@ -125,7 +125,7 @@ pub struct EcosystemContracts { pub diamond_cut_data: String, } -impl ZkToolboxConfig for EcosystemContracts {} +impl ZkStackConfig for EcosystemContracts {} #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct BridgesContracts { diff --git a/zk_toolbox/crates/config/src/docker_compose.rs b/zkstack_cli/crates/config/src/docker_compose.rs similarity index 94% rename from zk_toolbox/crates/config/src/docker_compose.rs rename to zkstack_cli/crates/config/src/docker_compose.rs index 05c6e73eaea5..2208c5a8654e 100644 --- a/zk_toolbox/crates/config/src/docker_compose.rs +++ b/zkstack_cli/crates/config/src/docker_compose.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; #[derive(Debug, Default, Serialize, Deserialize, Clone)] pub struct DockerComposeConfig { @@ -34,7 +34,7 @@ pub struct DockerComposeService { pub other: serde_json::Value, } -impl ZkToolboxConfig for DockerComposeConfig {} +impl ZkStackConfig for DockerComposeConfig {} impl DockerComposeConfig { pub fn add_service(&mut self, name: &str, service: DockerComposeService) { diff --git a/zk_toolbox/crates/config/src/ecosystem.rs b/zkstack_cli/crates/config/src/ecosystem.rs similarity index 95% rename from zk_toolbox/crates/config/src/ecosystem.rs rename to zkstack_cli/crates/config/src/ecosystem.rs index 7e7c5d4dae52..79cb1c4ea27d 100644 --- a/zk_toolbox/crates/config/src/ecosystem.rs +++ b/zkstack_cli/crates/config/src/ecosystem.rs @@ -21,7 +21,7 @@ use crate::{ input::{Erc20DeploymentConfig, InitialDeploymentConfig}, output::{ERC20Tokens, Erc20Token}, }, - traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkStackConfig}, ChainConfig, ChainConfigInternal, ContractsConfig, WalletsConfig, }; @@ -94,9 +94,9 @@ impl FileConfigWithDefaultName for EcosystemConfig { const FILE_NAME: &'static str = CONFIG_NAME; } -impl ZkToolboxConfig for EcosystemConfigInternal {} +impl ZkStackConfig for EcosystemConfigInternal {} -impl ZkToolboxConfig for EcosystemConfig {} +impl ZkStackConfig for EcosystemConfig {} impl EcosystemConfig { fn get_shell(&self) -> &Shell { @@ -146,20 +146,20 @@ impl EcosystemConfig { .unwrap_or(self.default_chain.as_ref()) } - pub fn load_chain(&self, name: Option) -> Option { + pub fn load_chain(&self, name: Option) -> anyhow::Result { let name = name.unwrap_or(self.default_chain.clone()); self.load_chain_inner(&name) } - pub fn load_current_chain(&self) -> Option { + pub fn load_current_chain(&self) -> anyhow::Result { self.load_chain_inner(self.current_chain()) } - fn load_chain_inner(&self, name: &str) -> Option { + fn load_chain_inner(&self, name: &str) -> anyhow::Result { let path = self.chains.join(name).join(CONFIG_NAME); - let config = ChainConfigInternal::read(self.get_shell(), path.clone()).ok()?; + let config = ChainConfigInternal::read(self.get_shell(), path.clone())?; - Some(ChainConfig { + Ok(ChainConfig { id: config.id, name: config.name, chain_id: config.chain_id, @@ -196,8 +196,8 @@ impl EcosystemConfig { pub fn get_wallets(&self) -> anyhow::Result { let path = self.config.join(WALLETS_FILE); - if let Ok(wallets) = WalletsConfig::read(self.get_shell(), &path) { - return Ok(wallets); + if self.get_shell().path_exists(&path) { + return WalletsConfig::read(self.get_shell(), &path); } if self.wallet_creation == WalletCreation::Localhost { // Use 0 id for ecosystem wallets diff --git a/zk_toolbox/crates/config/src/explorer.rs b/zkstack_cli/crates/config/src/explorer.rs similarity index 98% rename from zk_toolbox/crates/config/src/explorer.rs rename to zkstack_cli/crates/config/src/explorer.rs index ee7a59e5105c..7ce9b986a1eb 100644 --- a/zk_toolbox/crates/config/src/explorer.rs +++ b/zkstack_cli/crates/config/src/explorer.rs @@ -8,7 +8,7 @@ use crate::{ EXPLORER_CONFIG_FILE, EXPLORER_JS_CONFIG_FILE, LOCAL_APPS_PATH, LOCAL_CONFIGS_PATH, LOCAL_GENERATED_PATH, }, - traits::{ReadConfig, SaveConfig, ZkToolboxConfig}, + traits::{ReadConfig, SaveConfig, ZkStackConfig}, }; /// Explorer JSON configuration file. This file contains configuration for the explorer app. @@ -144,4 +144,4 @@ impl Default for ExplorerConfig { } } -impl ZkToolboxConfig for ExplorerConfig {} +impl ZkStackConfig for ExplorerConfig {} diff --git a/zk_toolbox/crates/config/src/explorer_compose.rs b/zkstack_cli/crates/config/src/explorer_compose.rs similarity index 98% rename from zk_toolbox/crates/config/src/explorer_compose.rs rename to zkstack_cli/crates/config/src/explorer_compose.rs index ca9abc1e3e23..13dd665d2e3d 100644 --- a/zk_toolbox/crates/config/src/explorer_compose.rs +++ b/zkstack_cli/crates/config/src/explorer_compose.rs @@ -16,7 +16,7 @@ use crate::{ EXPLORER_WORKER_DOCKER_IMAGE, LOCAL_CHAINS_PATH, LOCAL_CONFIGS_PATH, }, docker_compose::{DockerComposeConfig, DockerComposeService}, - traits::ZkToolboxConfig, + traits::ZkStackConfig, EXPLORER_BATCHES_PROCESSING_POLLING_INTERVAL, }; @@ -72,7 +72,7 @@ pub struct ExplorerBackendComposeConfig { pub docker_compose: DockerComposeConfig, } -impl ZkToolboxConfig for ExplorerBackendComposeConfig {} +impl ZkStackConfig for ExplorerBackendComposeConfig {} impl ExplorerBackendComposeConfig { const API_NAME: &'static str = "api"; diff --git a/zk_toolbox/crates/config/src/external_node.rs b/zkstack_cli/crates/config/src/external_node.rs similarity index 82% rename from zk_toolbox/crates/config/src/external_node.rs rename to zkstack_cli/crates/config/src/external_node.rs index a07ff5dc1400..7d884d3e2346 100644 --- a/zk_toolbox/crates/config/src/external_node.rs +++ b/zkstack_cli/crates/config/src/external_node.rs @@ -2,7 +2,7 @@ use std::path::Path; use xshell::Shell; pub use zksync_config::configs::en_config::ENConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::EN_CONFIG_FILE, @@ -23,6 +23,6 @@ impl SaveConfig for ENConfig { impl ReadConfig for ENConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/file_config.rs b/zkstack_cli/crates/config/src/file_config.rs similarity index 100% rename from zk_toolbox/crates/config/src/file_config.rs rename to zkstack_cli/crates/config/src/file_config.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/accept_ownership/mod.rs b/zkstack_cli/crates/config/src/forge_interface/accept_ownership/mod.rs similarity index 71% rename from zk_toolbox/crates/config/src/forge_interface/accept_ownership/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/accept_ownership/mod.rs index 636cffc49f89..4f73483b393a 100644 --- a/zk_toolbox/crates/config/src/forge_interface/accept_ownership/mod.rs +++ b/zkstack_cli/crates/config/src/forge_interface/accept_ownership/mod.rs @@ -1,9 +1,9 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; -impl ZkToolboxConfig for AcceptOwnershipInput {} +impl ZkStackConfig for AcceptOwnershipInput {} #[derive(Debug, Deserialize, Serialize, Clone)] pub struct AcceptOwnershipInput { diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/input.rs similarity index 97% rename from zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/input.rs index 41ce906f455b..d5611f805b17 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/input.rs @@ -10,7 +10,7 @@ use zksync_basic_types::L2ChainId; use crate::{ consts::INITIAL_DEPLOYMENT_FILE, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, ContractsConfig, GenesisConfig, WalletsConfig, }; @@ -61,7 +61,7 @@ impl FileConfigWithDefaultName for InitialDeploymentConfig { const FILE_NAME: &'static str = INITIAL_DEPLOYMENT_FILE; } -impl ZkToolboxConfig for InitialDeploymentConfig {} +impl ZkStackConfig for InitialDeploymentConfig {} #[derive(Debug, Deserialize, Serialize, Clone)] pub struct Erc20DeploymentConfig { @@ -72,7 +72,7 @@ impl FileConfigWithDefaultName for Erc20DeploymentConfig { const FILE_NAME: &'static str = INITIAL_DEPLOYMENT_FILE; } -impl ZkToolboxConfig for Erc20DeploymentConfig {} +impl ZkStackConfig for Erc20DeploymentConfig {} impl Default for Erc20DeploymentConfig { fn default() -> Self { @@ -115,7 +115,7 @@ pub struct DeployL1Config { pub tokens: TokensDeployL1Config, } -impl ZkToolboxConfig for DeployL1Config {} +impl ZkStackConfig for DeployL1Config {} impl DeployL1Config { pub fn new( @@ -212,7 +212,7 @@ pub struct DeployErc20Config { pub additional_addresses_for_minting: Vec
, } -impl ZkToolboxConfig for DeployErc20Config {} +impl ZkStackConfig for DeployErc20Config {} impl DeployErc20Config { pub fn new( diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/mod.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/output.rs similarity index 95% rename from zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/output.rs index 7f35cf0357c2..7a922cbdf3c0 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/output.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::{ consts::ERC20_CONFIGS_FILE, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, }; #[derive(Debug, Deserialize, Serialize, Clone)] @@ -21,7 +21,7 @@ pub struct DeployL1Output { pub deployed_addresses: DeployL1DeployedAddressesOutput, } -impl ZkToolboxConfig for DeployL1Output {} +impl ZkStackConfig for DeployL1Output {} #[derive(Debug, Deserialize, Serialize, Clone)] pub struct DeployL1ContractsConfigOutput { @@ -98,4 +98,4 @@ impl FileConfigWithDefaultName for ERC20Tokens { const FILE_NAME: &'static str = ERC20_CONFIGS_FILE; } -impl ZkToolboxConfig for ERC20Tokens {} +impl ZkStackConfig for ERC20Tokens {} diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/input.rs similarity index 92% rename from zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/input.rs index b20b58f99c58..3836dca9d24c 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/input.rs @@ -2,9 +2,9 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; use zksync_basic_types::L2ChainId; -use crate::{traits::ZkToolboxConfig, ChainConfig}; +use crate::{traits::ZkStackConfig, ChainConfig}; -impl ZkToolboxConfig for DeployL2ContractsInput {} +impl ZkStackConfig for DeployL2ContractsInput {} /// Fields corresponding to `contracts/l1-contracts/deploy-script-config-template/config-deploy-l2-config.toml` /// which are read by `contracts/l1-contracts/deploy-scripts/DeployL2Contracts.sol`. diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs similarity index 73% rename from zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs index ca5cac12c02d..29be89b91016 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs @@ -1,12 +1,12 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; -impl ZkToolboxConfig for InitializeBridgeOutput {} -impl ZkToolboxConfig for DefaultL2UpgradeOutput {} -impl ZkToolboxConfig for ConsensusRegistryOutput {} -impl ZkToolboxConfig for Multicall3Output {} +impl ZkStackConfig for InitializeBridgeOutput {} +impl ZkStackConfig for DefaultL2UpgradeOutput {} +impl ZkStackConfig for ConsensusRegistryOutput {} +impl ZkStackConfig for Multicall3Output {} #[derive(Debug, Clone, Serialize, Deserialize)] pub struct InitializeBridgeOutput { diff --git a/zk_toolbox/crates/config/src/forge_interface/mod.rs b/zkstack_cli/crates/config/src/forge_interface/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/paymaster/mod.rs b/zkstack_cli/crates/config/src/forge_interface/paymaster/mod.rs similarity index 83% rename from zk_toolbox/crates/config/src/forge_interface/paymaster/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/paymaster/mod.rs index 9631fe743180..2af7502e0b76 100644 --- a/zk_toolbox/crates/config/src/forge_interface/paymaster/mod.rs +++ b/zkstack_cli/crates/config/src/forge_interface/paymaster/mod.rs @@ -2,7 +2,7 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; use zksync_basic_types::L2ChainId; -use crate::{traits::ZkToolboxConfig, ChainConfig}; +use crate::{traits::ZkStackConfig, ChainConfig}; #[derive(Debug, Serialize, Deserialize, Clone)] pub struct DeployPaymasterInput { @@ -22,11 +22,11 @@ impl DeployPaymasterInput { } } -impl ZkToolboxConfig for DeployPaymasterInput {} +impl ZkStackConfig for DeployPaymasterInput {} #[derive(Debug, Serialize, Deserialize, Clone)] pub struct DeployPaymasterOutput { pub paymaster: Address, } -impl ZkToolboxConfig for DeployPaymasterOutput {} +impl ZkStackConfig for DeployPaymasterOutput {} diff --git a/zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs b/zkstack_cli/crates/config/src/forge_interface/register_chain/input.rs similarity index 96% rename from zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs rename to zkstack_cli/crates/config/src/forge_interface/register_chain/input.rs index e2e60294e867..fb7c606a4569 100644 --- a/zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/register_chain/input.rs @@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize}; use types::L1BatchCommitmentMode; use zksync_basic_types::L2ChainId; -use crate::{traits::ZkToolboxConfig, ChainConfig, ContractsConfig}; +use crate::{traits::ZkStackConfig, ChainConfig, ContractsConfig}; #[derive(Debug, Deserialize, Serialize, Clone)] struct Bridgehub { @@ -50,7 +50,7 @@ pub struct ChainL1Config { pub governance_min_delay: u64, } -impl ZkToolboxConfig for RegisterChainL1Config {} +impl ZkStackConfig for RegisterChainL1Config {} impl RegisterChainL1Config { pub fn new(chain_config: &ChainConfig, contracts: &ContractsConfig) -> anyhow::Result { diff --git a/zk_toolbox/crates/config/src/forge_interface/register_chain/mod.rs b/zkstack_cli/crates/config/src/forge_interface/register_chain/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/register_chain/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/register_chain/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs b/zkstack_cli/crates/config/src/forge_interface/register_chain/output.rs similarity index 75% rename from zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs rename to zkstack_cli/crates/config/src/forge_interface/register_chain/output.rs index f9521b16328f..a3e23f7bae42 100644 --- a/zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/register_chain/output.rs @@ -1,7 +1,7 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; #[derive(Debug, Deserialize, Serialize, Clone)] pub struct RegisterChainOutput { @@ -10,4 +10,4 @@ pub struct RegisterChainOutput { pub chain_admin_addr: Address, } -impl ZkToolboxConfig for RegisterChainOutput {} +impl ZkStackConfig for RegisterChainOutput {} diff --git a/zk_toolbox/crates/config/src/forge_interface/script_params.rs b/zkstack_cli/crates/config/src/forge_interface/script_params.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/script_params.rs rename to zkstack_cli/crates/config/src/forge_interface/script_params.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs b/zkstack_cli/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs similarity index 86% rename from zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs index e8189c521fb3..201cf86b734b 100644 --- a/zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs +++ b/zkstack_cli/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use zksync_basic_types::{Address, L2ChainId, H256}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SetupLegacyBridgeInput { @@ -17,4 +17,4 @@ pub struct SetupLegacyBridgeInput { pub create2factory_addr: Address, } -impl ZkToolboxConfig for SetupLegacyBridgeInput {} +impl ZkStackConfig for SetupLegacyBridgeInput {} diff --git a/zk_toolbox/crates/config/src/general.rs b/zkstack_cli/crates/config/src/general.rs similarity index 96% rename from zk_toolbox/crates/config/src/general.rs rename to zkstack_cli/crates/config/src/general.rs index a8e7407edd02..0079105b66ca 100644 --- a/zk_toolbox/crates/config/src/general.rs +++ b/zkstack_cli/crates/config/src/general.rs @@ -6,7 +6,7 @@ use url::Url; use xshell::Shell; use zksync_config::configs::object_store::ObjectStoreMode; pub use zksync_config::configs::GeneralConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::GENERAL_FILE, @@ -137,7 +137,7 @@ impl SaveConfig for GeneralConfig { impl ReadConfig for GeneralConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/genesis.rs b/zkstack_cli/crates/config/src/genesis.rs similarity index 88% rename from zk_toolbox/crates/config/src/genesis.rs rename to zkstack_cli/crates/config/src/genesis.rs index a6469893fed2..933252541f43 100644 --- a/zk_toolbox/crates/config/src/genesis.rs +++ b/zkstack_cli/crates/config/src/genesis.rs @@ -3,7 +3,7 @@ use std::path::Path; use xshell::Shell; use zksync_basic_types::L1ChainId; pub use zksync_config::GenesisConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::GENESIS_FILE, @@ -32,6 +32,6 @@ impl SaveConfig for GenesisConfig { impl ReadConfig for GenesisConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/lib.rs b/zkstack_cli/crates/config/src/lib.rs similarity index 90% rename from zk_toolbox/crates/config/src/lib.rs rename to zkstack_cli/crates/config/src/lib.rs index 1a7c5bf1d7e2..b449aefe3a26 100644 --- a/zk_toolbox/crates/config/src/lib.rs +++ b/zkstack_cli/crates/config/src/lib.rs @@ -10,7 +10,7 @@ pub use manipulations::*; pub use secrets::*; pub use wallet_creation::*; pub use wallets::*; -pub use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +pub use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; mod apps; mod chain; diff --git a/zk_toolbox/crates/config/src/manipulations.rs b/zkstack_cli/crates/config/src/manipulations.rs similarity index 100% rename from zk_toolbox/crates/config/src/manipulations.rs rename to zkstack_cli/crates/config/src/manipulations.rs diff --git a/zk_toolbox/crates/config/src/portal.rs b/zkstack_cli/crates/config/src/portal.rs similarity index 98% rename from zk_toolbox/crates/config/src/portal.rs rename to zkstack_cli/crates/config/src/portal.rs index c787c6cc7026..2b6f0ffd5156 100644 --- a/zk_toolbox/crates/config/src/portal.rs +++ b/zkstack_cli/crates/config/src/portal.rs @@ -9,7 +9,7 @@ use crate::{ LOCAL_APPS_PATH, LOCAL_CONFIGS_PATH, LOCAL_GENERATED_PATH, PORTAL_CONFIG_FILE, PORTAL_JS_CONFIG_FILE, }, - traits::{ReadConfig, SaveConfig, ZkToolboxConfig}, + traits::{ReadConfig, SaveConfig, ZkStackConfig}, }; /// Portal JSON configuration file. This file contains configuration for the portal app. @@ -172,4 +172,4 @@ impl Default for PortalConfig { } } -impl ZkToolboxConfig for PortalConfig {} +impl ZkStackConfig for PortalConfig {} diff --git a/zk_toolbox/crates/config/src/secrets.rs b/zkstack_cli/crates/config/src/secrets.rs similarity index 92% rename from zk_toolbox/crates/config/src/secrets.rs rename to zkstack_cli/crates/config/src/secrets.rs index 02ace5da88ef..cf0a9927c560 100644 --- a/zk_toolbox/crates/config/src/secrets.rs +++ b/zkstack_cli/crates/config/src/secrets.rs @@ -5,7 +5,7 @@ use common::db::DatabaseConfig; use xshell::Shell; use zksync_basic_types::url::SensitiveUrl; pub use zksync_config::configs::Secrets as SecretsConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::SECRETS_FILE, @@ -59,6 +59,6 @@ impl SaveConfig for SecretsConfig { impl ReadConfig for SecretsConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/traits.rs b/zkstack_cli/crates/config/src/traits.rs similarity index 95% rename from zk_toolbox/crates/config/src/traits.rs rename to zkstack_cli/crates/config/src/traits.rs index bb0722762e31..a4a4ad22c613 100644 --- a/zk_toolbox/crates/config/src/traits.rs +++ b/zkstack_cli/crates/config/src/traits.rs @@ -8,8 +8,8 @@ use serde::{de::DeserializeOwned, Serialize}; use url::Url; use xshell::Shell; -// Configs that we use only inside zk toolbox, we don't have protobuf implementation for them. -pub trait ZkToolboxConfig {} +// Configs that we use only inside ZK Stack CLI, we don't have protobuf implementation for them. +pub trait ZkStackConfig {} pub trait FileConfigWithDefaultName { const FILE_NAME: &'static str; @@ -19,7 +19,7 @@ pub trait FileConfigWithDefaultName { } } -impl SaveConfig for T { +impl SaveConfig for T { fn save(&self, shell: &Shell, path: impl AsRef) -> anyhow::Result<()> { save_with_comment(shell, path, self, "") } @@ -49,7 +49,7 @@ pub trait ReadConfig: Sized { impl ReadConfig for T where - T: DeserializeOwned + Clone + ZkToolboxConfig, + T: DeserializeOwned + Clone + ZkStackConfig, { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let error_context = || format!("Failed to parse config file {:?}.", path.as_ref()); diff --git a/zk_toolbox/crates/config/src/wallet_creation.rs b/zkstack_cli/crates/config/src/wallet_creation.rs similarity index 100% rename from zk_toolbox/crates/config/src/wallet_creation.rs rename to zkstack_cli/crates/config/src/wallet_creation.rs diff --git a/zk_toolbox/crates/config/src/wallets.rs b/zkstack_cli/crates/config/src/wallets.rs similarity index 75% rename from zk_toolbox/crates/config/src/wallets.rs rename to zkstack_cli/crates/config/src/wallets.rs index 9c87453954ec..735848f6e34d 100644 --- a/zk_toolbox/crates/config/src/wallets.rs +++ b/zkstack_cli/crates/config/src/wallets.rs @@ -1,11 +1,10 @@ use common::wallets::Wallet; -use ethers::types::H256; -use rand::Rng; +use rand::{CryptoRng, Rng}; use serde::{Deserialize, Serialize}; use crate::{ consts::WALLETS_FILE, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, }; #[derive(Debug, Clone, Serialize, Deserialize)] @@ -20,7 +19,7 @@ pub struct WalletsConfig { impl WalletsConfig { /// Generate random wallets - pub fn random(rng: &mut impl Rng) -> Self { + pub fn random(rng: &mut (impl CryptoRng + Rng)) -> Self { Self { deployer: Some(Wallet::random(rng)), operator: Wallet::random(rng), @@ -42,13 +41,6 @@ impl WalletsConfig { token_multiplier_setter: Some(Wallet::empty()), } } - pub fn deployer_private_key(&self) -> Option { - self.deployer.as_ref().and_then(|wallet| wallet.private_key) - } - - pub fn governor_private_key(&self) -> Option { - self.governor.private_key - } } impl FileConfigWithDefaultName for WalletsConfig { @@ -63,6 +55,6 @@ pub(crate) struct EthMnemonicConfig { pub(crate) base_path: String, } -impl ZkToolboxConfig for EthMnemonicConfig {} +impl ZkStackConfig for EthMnemonicConfig {} -impl ZkToolboxConfig for WalletsConfig {} +impl ZkStackConfig for WalletsConfig {} diff --git a/zk_toolbox/crates/git_version_macro/Cargo.toml b/zkstack_cli/crates/git_version_macro/Cargo.toml similarity index 100% rename from zk_toolbox/crates/git_version_macro/Cargo.toml rename to zkstack_cli/crates/git_version_macro/Cargo.toml diff --git a/zk_toolbox/crates/git_version_macro/src/lib.rs b/zkstack_cli/crates/git_version_macro/src/lib.rs similarity index 100% rename from zk_toolbox/crates/git_version_macro/src/lib.rs rename to zkstack_cli/crates/git_version_macro/src/lib.rs diff --git a/zk_toolbox/crates/types/Cargo.toml b/zkstack_cli/crates/types/Cargo.toml similarity index 100% rename from zk_toolbox/crates/types/Cargo.toml rename to zkstack_cli/crates/types/Cargo.toml diff --git a/zk_toolbox/crates/types/src/base_token.rs b/zkstack_cli/crates/types/src/base_token.rs similarity index 100% rename from zk_toolbox/crates/types/src/base_token.rs rename to zkstack_cli/crates/types/src/base_token.rs diff --git a/zk_toolbox/crates/types/src/l1_network.rs b/zkstack_cli/crates/types/src/l1_network.rs similarity index 100% rename from zk_toolbox/crates/types/src/l1_network.rs rename to zkstack_cli/crates/types/src/l1_network.rs diff --git a/zk_toolbox/crates/types/src/lib.rs b/zkstack_cli/crates/types/src/lib.rs similarity index 71% rename from zk_toolbox/crates/types/src/lib.rs rename to zkstack_cli/crates/types/src/lib.rs index 8b6470571051..075e39345bcf 100644 --- a/zk_toolbox/crates/types/src/lib.rs +++ b/zkstack_cli/crates/types/src/lib.rs @@ -10,5 +10,5 @@ pub use prover_mode::*; pub use token_info::*; pub use wallet_creation::*; pub use zksync_basic_types::{ - commitment::L1BatchCommitmentMode, protocol_version::ProtocolSemanticVersion, + commitment::L1BatchCommitmentMode, parse_h256, protocol_version::ProtocolSemanticVersion, }; diff --git a/zk_toolbox/crates/types/src/prover_mode.rs b/zkstack_cli/crates/types/src/prover_mode.rs similarity index 100% rename from zk_toolbox/crates/types/src/prover_mode.rs rename to zkstack_cli/crates/types/src/prover_mode.rs diff --git a/zk_toolbox/crates/types/src/token_info.rs b/zkstack_cli/crates/types/src/token_info.rs similarity index 100% rename from zk_toolbox/crates/types/src/token_info.rs rename to zkstack_cli/crates/types/src/token_info.rs diff --git a/zk_toolbox/crates/types/src/wallet_creation.rs b/zkstack_cli/crates/types/src/wallet_creation.rs similarity index 100% rename from zk_toolbox/crates/types/src/wallet_creation.rs rename to zkstack_cli/crates/types/src/wallet_creation.rs diff --git a/zk_toolbox/crates/zk_inception/Cargo.toml b/zkstack_cli/crates/zkstack/Cargo.toml similarity index 75% rename from zk_toolbox/crates/zk_inception/Cargo.toml rename to zkstack_cli/crates/zkstack/Cargo.toml index 28b709c557b7..a9fcecaf79b4 100644 --- a/zk_toolbox/crates/zk_inception/Cargo.toml +++ b/zkstack_cli/crates/zkstack/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "zk_inception" +name = "zkstack" version = "0.1.0" edition.workspace = true homepage.workspace = true @@ -12,31 +12,43 @@ keywords.workspace = true [dependencies] anyhow.workspace = true +chrono.workspace = true clap.workspace = true +clap-markdown.workspace = true cliclack.workspace = true +common.workspace = true config.workspace = true +ethers.workspace = true +futures.workspace = true human-panic.workspace = true lazy_static.workspace = true -serde_yaml.workspace = true +secrecy.workspace = true serde.workspace = true serde_json.workspace = true -xshell.workspace = true -ethers.workspace = true -common.workspace = true -tokio.workspace = true -types.workspace = true +serde_yaml.workspace = true +slugify-rs.workspace = true strum.workspace = true +sqruff-lib = "0.19.0" +thiserror.workspace = true +tokio.workspace = true toml.workspace = true +types.workspace = true url.workspace = true -thiserror.workspace = true -zksync_config.workspace = true -slugify-rs.workspace = true +xshell.workspace = true zksync_basic_types.workspace = true -clap-markdown.workspace = true +zksync_config.workspace = true zksync_consensus_roles.workspace = true zksync_consensus_crypto.workspace = true -secrecy.workspace = true +zksync_protobuf.workspace = true +zksync_protobuf_config.workspace = true +prost.workspace = true +reqwest = "0.12.8" + +[dev-dependencies] +rand.workspace = true +zksync_consensus_utils.workspace = true [build-dependencies] eyre.workspace = true ethers.workspace = true +zksync_protobuf_build.workspace = true diff --git a/zk_toolbox/crates/zk_inception/README.md b/zkstack_cli/crates/zkstack/README.md similarity index 90% rename from zk_toolbox/crates/zk_inception/README.md rename to zkstack_cli/crates/zkstack/README.md index 7923afe4e98f..6e529efc2009 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zkstack_cli/crates/zkstack/README.md @@ -17,8 +17,12 @@ This document contains the help content for the `zk_inception` command-line prog - [`zk_inception chain initialize-bridges`↴](#zk_inception-chain-initialize-bridges) - [`zk_inception chain deploy-l2-contracts`↴](#zk_inception-chain-deploy-l2-contracts) - [`zk_inception chain upgrader`↴](#zk_inception-chain-upgrader) +- [`zk_inception chain deploy-consensus-registry`↴](#zk_inception-chain-deploy-consensus-registry) +- [`zk_inception chain deploy-multicall3`↴](#zk_inception-chain-deploy-multicall3) - [`zk_inception chain deploy-paymaster`↴](#zk_inception-chain-deploy-paymaster) - [`zk_inception chain update-token-multiplier-setter`↴](#zk_inception-chain-update-token-multiplier-setter) +- [`zk_inception consensus set-attester-committee`↴](#zk_inception-consensus-set-attester-committee) +- [`zk_inception consensus get-attester-committee`↴](#zk_inception-consensus-get-attester-committee) - [`zk_inception prover`↴](#zk_inception-prover) - [`zk_inception prover init`↴](#zk_inception-prover-init) - [`zk_inception prover setup-keys`↴](#zk_inception-prover-setup-keys) @@ -38,7 +42,7 @@ This document contains the help content for the `zk_inception` command-line prog ## `zk_inception` -ZK Toolbox is a set of tools for working with zk stack. +ZK Stack CLI is a set of tools for working with zk stack. **Usage:** `zk_inception [OPTIONS] ` @@ -364,6 +368,18 @@ Deploy Default Upgrader e.g.: `zk_inception init -a --private-key=` +## `zk_inception chain deploy-consensus-registry` + +Deploy Consensus Registry smart contract + +**Usage:** `zk_inception chain deploy-consensus-registry` + +## `zk_inception chain deploy-multicall3` + +Deploy Multicall3 smart contract + +**Usage:** `zk_inception chain deploy-multicall3` + ## `zk_inception chain deploy-paymaster` Deploy paymaster smart contract @@ -414,6 +430,47 @@ Update Token Multiplier Setter address on L1 e.g.: `zk_inception init -a --private-key=` +## `zk_inception consensus` + +Consensus related commands + +**Usage:** `zk_inception consensus ` + +###### **Subcommands:** + +- `set-attester-committee` — Set attester committee +- `get-attester-committee` — Get attester committee + +## `zk_inception consensus set-attester-committee` + +Set attester committee in the consensus registry smart contract. Requires `consensus_registry` and `multicall3` +contracts to be deployed. + +**Usage:** `zk_inception consensus set-attester-committee [OPTIONS]` + +###### **Options:** + +- `--from-genesis` — Set attester committee to `consensus.genesis_spec.attesters` in general.yaml Mutually exclusive + with `--from-file`. +- `--from-file ` — Set attester committee to committee specified in yaml file at `PATH`. + Mutually exclusive with `--from-genesis`. File format is specified in + `zk_inception/src/commands/consensus/proto/mod.proto`. Example: + + ```yaml + attesters: + - key: attester:public:secp256k1:0339d4b0cdd9896d3929631a4e5e9a5b4919f52592bec571d70bb0e50a3a824714 + weight: 1 + - key: attester:public:secp256k1:024897d8c10d7a57d108cfe2a724d7824c657f219ef5d9f7674810a6746c19fa7b + weight: 1 + ``` + +## `zk_inception consensus get-attester-committee` + +Requires `consensus_registry` and `multicall3` contracts to be deployed. Fetches attester committee from the consensus +registry contract and prints it. + +**Usage:** `zk_inception consensus get-attester-committee` + ## `zk_inception prover` Prover related commands diff --git a/zk_toolbox/crates/zk_inception/abi/ConsensusRegistry.json b/zkstack_cli/crates/zkstack/abi/ConsensusRegistry.json similarity index 100% rename from zk_toolbox/crates/zk_inception/abi/ConsensusRegistry.json rename to zkstack_cli/crates/zkstack/abi/ConsensusRegistry.json diff --git a/zkstack_cli/crates/zkstack/build.rs b/zkstack_cli/crates/zkstack/build.rs new file mode 100644 index 000000000000..92f34a542b7f --- /dev/null +++ b/zkstack_cli/crates/zkstack/build.rs @@ -0,0 +1,21 @@ +use std::path::PathBuf; + +use ethers::contract::Abigen; + +fn main() -> eyre::Result<()> { + let outdir = PathBuf::from(std::env::var("OUT_DIR")?).canonicalize()?; + Abigen::new("ConsensusRegistry", "abi/ConsensusRegistry.json")? + .generate()? + .write_to_file(outdir.join("consensus_registry_abi.rs"))?; + + zksync_protobuf_build::Config { + input_root: "src/commands/consensus/proto".into(), + proto_root: "zksync/toolbox/consensus".into(), + dependencies: vec!["::zksync_protobuf_config::proto".parse().unwrap()], + protobuf_crate: "::zksync_protobuf".parse().unwrap(), + is_public: false, + } + .generate() + .unwrap(); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/accept_ownership.rs b/zkstack_cli/crates/zkstack/src/accept_ownership.rs similarity index 92% rename from zk_toolbox/crates/zk_inception/src/accept_ownership.rs rename to zkstack_cli/crates/zkstack/src/accept_ownership.rs index d2bab9283740..474e76e599a8 100644 --- a/zk_toolbox/crates/zk_inception/src/accept_ownership.rs +++ b/zkstack_cli/crates/zkstack/src/accept_ownership.rs @@ -1,13 +1,10 @@ use common::{ forge::{Forge, ForgeScript, ForgeScriptArgs}, spinner::Spinner, + wallets::Wallet, }; use config::{forge_interface::script_params::ACCEPT_GOVERNANCE_SCRIPT_PARAMS, EcosystemConfig}; -use ethers::{ - abi::parse_abi, - contract::BaseContract, - types::{Address, H256}, -}; +use ethers::{abi::parse_abi, contract::BaseContract, types::Address}; use lazy_static::lazy_static; use xshell::Shell; @@ -31,7 +28,7 @@ pub async fn accept_admin( shell: &Shell, ecosystem_config: &EcosystemConfig, admin: Address, - governor: Option, + governor: &Wallet, target_address: Address, forge_args: &ForgeScriptArgs, l1_rpc_url: String, @@ -62,7 +59,7 @@ pub async fn accept_owner( shell: &Shell, ecosystem_config: &EcosystemConfig, governor_contract: Address, - governor: Option, + governor: &Wallet, target_address: Address, forge_args: &ForgeScriptArgs, l1_rpc_url: String, @@ -89,10 +86,10 @@ pub async fn accept_owner( async fn accept_ownership( shell: &Shell, - governor: Option, + governor: &Wallet, mut forge: ForgeScript, ) -> anyhow::Result<()> { - forge = fill_forge_private_key(forge, governor)?; + forge = fill_forge_private_key(forge, Some(governor))?; check_the_balance(&forge).await?; let spinner = Spinner::new(MSG_ACCEPTING_GOVERNANCE_SPINNER); forge.run(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/containers.rs b/zkstack_cli/crates/zkstack/src/commands/args/containers.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/args/containers.rs rename to zkstack_cli/crates/zkstack/src/commands/args/containers.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs b/zkstack_cli/crates/zkstack/src/commands/args/run_server.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs rename to zkstack_cli/crates/zkstack/src/commands/args/run_server.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/update.rs b/zkstack_cli/crates/zkstack/src/commands/args/update.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/args/update.rs rename to zkstack_cli/crates/zkstack/src/commands/args/update.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/accept_chain_ownership.rs b/zkstack_cli/crates/zkstack/src/commands/chain/accept_chain_ownership.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/chain/accept_chain_ownership.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/accept_chain_ownership.rs index 37d69fcf5bcc..cf3e2981b3c7 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/accept_chain_ownership.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/accept_chain_ownership.rs @@ -30,7 +30,7 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { shell, &ecosystem_config, contracts.l1.chain_admin_addr, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, contracts.l1.diamond_proxy_addr, &args, l1_rpc_url.clone(), diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/build_transactions.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/build_transactions.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/build_transactions.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/configs.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/init/configs.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/init/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs b/zkstack_cli/crates/zkstack/src/commands/chain/common.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/common.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/common.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs b/zkstack_cli/crates/zkstack/src/commands/chain/create.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/create.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/create.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs similarity index 89% rename from zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs index 26a1d0bb3251..5a4f1f86f350 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs @@ -35,6 +35,7 @@ pub enum Deploy2ContractsOption { Upgrader, InitiailizeBridges, ConsensusRegistry, + Multicall3, } pub async fn run( @@ -82,6 +83,16 @@ pub async fn run( ) .await?; } + Deploy2ContractsOption::Multicall3 => { + deploy_multicall3( + shell, + &chain_config, + &ecosystem_config, + &mut contracts, + args, + ) + .await?; + } Deploy2ContractsOption::InitiailizeBridges => { initialize_bridges( shell, @@ -184,6 +195,24 @@ pub async fn deploy_consensus_registry( .await } +pub async fn deploy_multicall3( + shell: &Shell, + chain_config: &ChainConfig, + ecosystem_config: &EcosystemConfig, + contracts_config: &mut ContractsConfig, + forge_args: ForgeScriptArgs, +) -> anyhow::Result<()> { + build_and_deploy( + shell, + chain_config, + ecosystem_config, + forge_args, + Some("runDeployMulticall3"), + |shell, out| contracts_config.set_multicall3(&Multicall3Output::read(shell, out)?), + ) + .await +} + pub async fn deploy_l2_contracts( shell: &Shell, chain_config: &ChainConfig, @@ -248,10 +277,7 @@ async fn call_forge( forge = forge.with_signature(signature); } - forge = fill_forge_private_key( - forge, - ecosystem_config.get_wallets()?.governor_private_key(), - )?; + forge = fill_forge_private_key(forge, Some(&ecosystem_config.get_wallets()?.governor))?; check_the_balance(&forge).await?; forge.run(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_paymaster.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/deploy_paymaster.rs index 0da56f0c962d..4a93fcc089f8 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_paymaster.rs @@ -56,10 +56,7 @@ pub async fn deploy_paymaster( if let Some(address) = sender { forge = forge.with_sender(address); } else { - forge = fill_forge_private_key( - forge, - chain_config.get_wallets_config()?.governor_private_key(), - )?; + forge = fill_forge_private_key(forge, Some(&chain_config.get_wallets_config()?.governor))?; } if broadcast { diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/database.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/genesis/database.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/genesis/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/server.rs b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/server.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/genesis/server.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/genesis/server.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs similarity index 79% rename from zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs index d0897473b832..37ee2e076ab9 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs @@ -2,7 +2,7 @@ use anyhow::Context; use common::logger; use config::{ copy_configs, set_l1_rpc_url, traits::SaveConfigWithBasePath, update_from_chain_config, - ChainConfig, ContractsConfig, EcosystemConfig, DEFAULT_CONSENSUS_PORT, + ChainConfig, ContractsConfig, EcosystemConfig, }; use ethers::types::Address; use xshell::Shell; @@ -15,13 +15,12 @@ use crate::{ }, portal::update_portal_config, }, - defaults::PORT_RANGE_END, messages::{ - MSG_CHAIN_CONFIGS_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, + MSG_CHAIN_CONFIGS_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, MSG_CONSENSUS_CONFIG_MISSING_ERR, MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, }, utils::{ - consensus::{generate_consensus_keys, get_consensus_config, get_consensus_secrets}, + consensus::{generate_consensus_keys, get_consensus_secrets, get_genesis_specs}, ports::EcosystemPortsScanner, }, }; @@ -57,22 +56,14 @@ pub async fn init_configs( )?; } - // Initialize general config let mut general_config = chain_config.get_general_config()?; - - // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` - let offset = ((chain_config.id - 1) * 100) as u16; - let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; - let consensus_port = - ecosystem_ports.allocate_port(consensus_port_range, "Consensus".to_string())?; + let mut consensus_config = general_config + .consensus_config + .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; let consensus_keys = generate_consensus_keys(); - let consensus_config = get_consensus_config( - chain_config, - consensus_port, - Some(consensus_keys.clone()), - None, - )?; + consensus_config.genesis_spec = Some(get_genesis_specs(chain_config, &consensus_keys)); + general_config.consensus_config = Some(consensus_config); general_config.save_with_base_path(shell, &chain_config.configs)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/init/mod.rs similarity index 97% rename from zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/init/mod.rs index ac80a5b98f72..d92c56d2eb10 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/init/mod.rs @@ -107,7 +107,7 @@ pub async fn init( shell, ecosystem_config, contracts_config.l1.chain_admin_addr, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, contracts_config.l1.diamond_proxy_addr, &init_args.forge_args.clone(), init_args.l1_rpc_url.clone(), @@ -121,7 +121,7 @@ pub async fn init( set_token_multiplier_setter( shell, ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, contracts_config.l1.chain_admin_addr, chain_config .get_wallets_config() diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/mod.rs similarity index 93% rename from zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/mod.rs index 378309a07cb1..c9a47616486d 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/mod.rs @@ -53,6 +53,9 @@ pub enum ChainCommands { /// Deploy L2 consensus registry #[command(alias = "consensus")] DeployConsensusRegistry(ForgeScriptArgs), + /// Deploy L2 multicall3 + #[command(alias = "multicall3")] + DeployMulticall3(ForgeScriptArgs), /// Deploy Default Upgrader #[command(alias = "upgrader")] DeployUpgrader(ForgeScriptArgs), @@ -77,6 +80,9 @@ pub(crate) async fn run(shell: &Shell, args: ChainCommands) -> anyhow::Result<() ChainCommands::DeployConsensusRegistry(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::ConsensusRegistry).await } + ChainCommands::DeployMulticall3(args) => { + deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::Multicall3).await + } ChainCommands::DeployUpgrader(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::Upgrader).await } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/register_chain.rs b/zkstack_cli/crates/zkstack/src/commands/chain/register_chain.rs similarity index 96% rename from zk_toolbox/crates/zk_inception/src/commands/chain/register_chain.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/register_chain.rs index 9f2ff41f897e..65ee05a1ea5f 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/register_chain.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/register_chain.rs @@ -81,7 +81,7 @@ pub async fn register_chain( if let Some(address) = sender { forge = forge.with_sender(address); } else { - forge = fill_forge_private_key(forge, config.get_wallets()?.governor_private_key())?; + forge = fill_forge_private_key(forge, Some(&config.get_wallets()?.governor))?; check_the_balance(&forge).await?; } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs b/zkstack_cli/crates/zkstack/src/commands/chain/set_token_multiplier_setter.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/set_token_multiplier_setter.rs index 475725cd14ef..4a6cd31b2c0a 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/set_token_multiplier_setter.rs @@ -3,12 +3,13 @@ use common::{ forge::{Forge, ForgeScript, ForgeScriptArgs}, logger, spinner::Spinner, + wallets::Wallet, }; use config::{forge_interface::script_params::ACCEPT_GOVERNANCE_SCRIPT_PARAMS, EcosystemConfig}; use ethers::{abi::parse_abi, contract::BaseContract, utils::hex}; use lazy_static::lazy_static; use xshell::Shell; -use zksync_basic_types::{Address, H256}; +use zksync_basic_types::Address; use crate::{ messages::{ @@ -52,7 +53,7 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { set_token_multiplier_setter( shell, &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, contracts_config.l1.chain_admin_addr, token_multiplier_setter_address, &args.clone(), @@ -72,7 +73,7 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { pub async fn set_token_multiplier_setter( shell: &Shell, ecosystem_config: &EcosystemConfig, - governor: Option, + governor: &Wallet, chain_admin_address: Address, target_address: Address, forge_args: &ForgeScriptArgs, @@ -105,10 +106,10 @@ pub async fn set_token_multiplier_setter( async fn update_token_multiplier_setter( shell: &Shell, - governor: Option, + governor: &Wallet, mut forge: ForgeScript, ) -> anyhow::Result<()> { - forge = fill_forge_private_key(forge, governor)?; + forge = fill_forge_private_key(forge, Some(governor))?; check_the_balance(&forge).await?; forge.run(shell)?; Ok(()) diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs b/zkstack_cli/crates/zkstack/src/commands/chain/setup_legacy_bridge.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/setup_legacy_bridge.rs index 925014fe4e61..f61c640ffb6b 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/setup_legacy_bridge.rs @@ -59,10 +59,7 @@ pub async fn setup_legacy_bridge( ) .with_broadcast(); - forge = fill_forge_private_key( - forge, - ecosystem_config.get_wallets()?.governor_private_key(), - )?; + forge = fill_forge_private_key(forge, Some(&ecosystem_config.get_wallets()?.governor))?; let spinner = Spinner::new(MSG_DEPLOYING_PAYMASTER); check_the_balance(&forge).await?; diff --git a/zkstack_cli/crates/zkstack/src/commands/consensus/conv.rs b/zkstack_cli/crates/zkstack/src/commands/consensus/conv.rs new file mode 100644 index 000000000000..c9d878c8fd32 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/consensus/conv.rs @@ -0,0 +1,47 @@ +use anyhow::Context as _; +use zksync_config::configs::consensus as config; +use zksync_consensus_crypto::TextFmt as _; +use zksync_consensus_roles::attester; +use zksync_protobuf::{ProtoFmt, ProtoRepr}; + +use super::proto; +use crate::utils::consensus::parse_attester_committee; + +#[derive(Debug, Clone, PartialEq)] +pub(super) struct SetAttesterCommitteeFile { + pub attesters: attester::Committee, +} + +impl ProtoFmt for SetAttesterCommitteeFile { + type Proto = proto::SetAttesterCommitteeFile; + + fn read(r: &Self::Proto) -> anyhow::Result { + // zksync_config was not allowed to depend on consensus crates, + // therefore to parse the config we need to go through the intermediate + // representation of consensus types defined in zksync_config. + let attesters: Vec<_> = r + .attesters + .iter() + .map(|x| x.read()) + .collect::>() + .context("attesters")?; + Ok(Self { + attesters: parse_attester_committee(&attesters)?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + attesters: self + .attesters + .iter() + .map(|a| { + ProtoRepr::build(&config::WeightedAttester { + key: config::AttesterPublicKey(a.key.encode()), + weight: a.weight, + }) + }) + .collect(), + } + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus.rs b/zkstack_cli/crates/zkstack/src/commands/consensus/mod.rs similarity index 79% rename from zk_toolbox/crates/zk_inception/src/commands/consensus.rs rename to zkstack_cli/crates/zkstack/src/commands/consensus/mod.rs index 7cf96ebe5ad5..1855a5943dc7 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/consensus.rs +++ b/zkstack_cli/crates/zkstack/src/commands/consensus/mod.rs @@ -1,10 +1,11 @@ -use std::{borrow::Borrow, collections::HashMap, sync::Arc}; +use std::{borrow::Borrow, collections::HashMap, path::PathBuf, sync::Arc}; /// Consensus registry contract operations. /// Includes code duplicated from `zksync_node_consensus::registry::abi`. use anyhow::Context as _; -use common::logger; +use common::{logger, wallets::Wallet}; use config::EcosystemConfig; +use conv::*; use ethers::{ abi::Detokenize, contract::{FunctionCall, Multicall}, @@ -19,6 +20,11 @@ use zksync_consensus_roles::{attester, validator}; use crate::{messages, utils::consensus::parse_attester_committee}; +mod conv; +mod proto; +#[cfg(test)] +mod tests; + #[allow(warnings)] mod abi { include!(concat!(env!("OUT_DIR"), "/consensus_registry_abi.rs")); @@ -65,11 +71,25 @@ fn encode_validator_pop(pop: &validator::ProofOfPossession) -> abi::Bls12381Sign } } +#[derive(clap::Args, Debug)] +#[group(required = true, multiple = false)] +pub struct SetAttesterCommitteeCommand { + /// Sets the attester committee in the consensus registry contract to + /// `consensus.genesis_spec.attesters` in general.yaml. + #[clap(long)] + from_genesis: bool, + /// Sets the attester committee in the consensus registry contract to + /// the committee in the yaml file. + /// File format is definied in `commands/consensus/proto/mod.proto`. + #[clap(long)] + from_file: Option, +} + #[derive(clap::Subcommand, Debug)] pub enum Command { /// Sets the attester committee in the consensus registry contract to /// `consensus.genesis_spec.attesters` in general.yaml. - SetAttesterCommittee, + SetAttesterCommittee(SetAttesterCommitteeCommand), /// Fetches the attester committee from the consensus registry contract. GetAttesterCommittee, } @@ -154,26 +174,26 @@ impl Setup { )?) } - fn governor(&self) -> anyhow::Result> { - let governor = self + fn governor(&self) -> anyhow::Result { + Ok(self .chain .get_wallets_config() .context("get_wallets_config()")? - .governor - .private_key - .context(messages::MSG_GOVERNOR_PRIVATE_KEY_NOT_SET)?; - let governor = LocalWallet::from_bytes(governor.as_bytes()) - .context("LocalWallet::from_bytes()")? - .with_chain_id(self.genesis.l2_chain_id.as_u64()); + .governor) + } + + fn signer(&self, wallet: LocalWallet) -> anyhow::Result> { + let wallet = wallet.with_chain_id(self.genesis.l2_chain_id.as_u64()); let provider = self.provider().context("provider()")?; - let signer = SignerMiddleware::new(provider, governor.clone()); + let signer = SignerMiddleware::new(provider, wallet.clone()); // Allows us to send next transaction without waiting for the previous to complete. - let signer = NonceManagerMiddleware::new(signer, governor.address()); + let signer = NonceManagerMiddleware::new(signer, wallet.address()); Ok(Arc::new(signer)) } fn new(shell: &Shell) -> anyhow::Result { - let ecosystem_config = EcosystemConfig::from_file(shell)?; + let ecosystem_config = + EcosystemConfig::from_file(shell).context("EcosystemConfig::from_file()")?; let chain = ecosystem_config .load_current_chain() .context(messages::MSG_CHAIN_NOT_INITIALIZED)?; @@ -227,9 +247,21 @@ impl Setup { attester::Committee::new(attesters.into_iter()).context("attester::Committee::new()") } - async fn set_attester_committee(&self) -> anyhow::Result { + fn read_attester_committee( + &self, + opts: &SetAttesterCommitteeCommand, + ) -> anyhow::Result { // Fetch the desired state. - let want = (|| { + if let Some(path) = &opts.from_file { + let yaml = std::fs::read_to_string(path).context("read_to_string()")?; + let file: SetAttesterCommitteeFile = zksync_protobuf::serde::Deserialize { + deny_unknown_fields: true, + } + .proto_fmt_from_yaml(&yaml) + .context("proto_fmt_from_yaml()")?; + return Ok(file.attesters); + } + let attesters = (|| { Some( &self .general @@ -241,15 +273,32 @@ impl Setup { ) })() .context(messages::MSG_CONSENSUS_GENESIS_SPEC_ATTESTERS_MISSING_IN_GENERAL_YAML)?; - let want = parse_attester_committee(want).context("parse_attester_committee()")?; + parse_attester_committee(attesters).context("parse_attester_committee()") + } + async fn set_attester_committee(&self, want: &attester::Committee) -> anyhow::Result<()> { let provider = self.provider().context("provider()")?; let block_id = self.last_block(&provider).await.context("last_block()")?; let governor = self.governor().context("governor()")?; + let signer = self.signer( + governor + .private_key + .clone() + .context(messages::MSG_GOVERNOR_PRIVATE_KEY_NOT_SET)?, + )?; let consensus_registry = self - .consensus_registry(governor.clone()) + .consensus_registry(signer.clone()) .context("consensus_registry()")?; - let mut multicall = self.multicall(governor.clone()).context("multicall()")?; + let mut multicall = self.multicall(signer).context("multicall()")?; + + let owner = consensus_registry.owner().call().await.context("owner()")?; + if owner != governor.address { + anyhow::bail!( + "governor ({:#x}) is different than the consensus registry owner ({:#x})", + governor.address, + owner + ); + } // Fetch contract state. let n: usize = consensus_registry @@ -337,7 +386,7 @@ impl Setup { ) .await?; txs.wait(&provider).await.context("wait()")?; - Ok(want) + Ok(()) } } @@ -345,8 +394,11 @@ impl Command { pub(crate) async fn run(self, shell: &Shell) -> anyhow::Result<()> { let setup = Setup::new(shell).context("Setup::new()")?; match self { - Self::SetAttesterCommittee => { - let want = setup.set_attester_committee().await?; + Self::SetAttesterCommittee(opts) => { + let want = setup + .read_attester_committee(&opts) + .context("read_attester_committee()")?; + setup.set_attester_committee(&want).await?; let got = setup.get_attester_committee().await?; anyhow::ensure!( got == want, diff --git a/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.proto b/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.proto new file mode 100644 index 000000000000..d8a7323f7144 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package zksync.toolbox.consensus; + +import "zksync/core/consensus.proto"; + +message SetAttesterCommitteeFile { + repeated core.consensus.WeightedAttester attesters = 1; +} diff --git a/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.rs b/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.rs new file mode 100644 index 000000000000..61a0a047f0a9 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.rs @@ -0,0 +1,6 @@ +#![allow(warnings)] + +include!(concat!( + env!("OUT_DIR"), + "/src/commands/consensus/proto/gen.rs" +)); diff --git a/zkstack_cli/crates/zkstack/src/commands/consensus/tests.rs b/zkstack_cli/crates/zkstack/src/commands/consensus/tests.rs new file mode 100644 index 000000000000..c2f393ad2294 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/consensus/tests.rs @@ -0,0 +1,19 @@ +use rand::{distributions::Distribution, Rng}; +use zksync_consensus_utils::EncodeDist; +use zksync_protobuf::testonly::{test_encode_all_formats, FmtConv}; + +use super::SetAttesterCommitteeFile; + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> SetAttesterCommitteeFile { + SetAttesterCommitteeFile { + attesters: rng.gen(), + } + } +} + +#[test] +fn test_encoding() { + let rng = &mut rand::thread_rng(); + test_encode_all_formats::>(rng); +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/containers.rs b/zkstack_cli/crates/zkstack/src/commands/containers.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/containers.rs rename to zkstack_cli/crates/zkstack/src/commands/containers.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/releases.rs similarity index 82% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/releases.rs index 2b2b4cf97b1b..ab169220f299 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs +++ b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/releases.rs @@ -1,8 +1,8 @@ use std::str::FromStr; -use common::{cmd::Cmd, spinner::Spinner}; +use common::spinner::Spinner; use serde::Deserialize; -use xshell::{cmd, Shell}; +use xshell::Shell; use crate::messages::{MSG_INVALID_ARCH_ERR, MSG_NO_RELEASES_FOUND_ERR}; @@ -76,22 +76,19 @@ fn get_compatible_archs(asset_name: &str) -> anyhow::Result> { fn get_releases(shell: &Shell, repo: &str, arch: Arch) -> anyhow::Result> { if repo == "ethereum/solc-bin" { - return get_solc_releases(shell, arch); + return get_solc_releases(arch); } - let mut cmd = cmd!( - shell, - "curl -f https://api.github.com/repos/{repo}/releases" - ); + let client = reqwest::blocking::Client::new(); + let mut request = client + .get(format!("https://api.github.com/repos/{repo}/releases")) + .header("User-Agent", "zkstack"); if let Ok(token) = shell.var("GITHUB_TOKEN") { - cmd = cmd.args(vec![ - "-H".to_string(), - format!("Authorization: Bearer {}", token), - ]); + request = request.header("Authorization", format!("Bearer {}", token)); } - let response = String::from_utf8(Cmd::new(cmd).run_with_output()?.stdout)?; + let response = request.send()?.text()?; let releases: Vec = serde_json::from_str(&response)?; let mut versions = vec![]; @@ -115,7 +112,7 @@ fn get_releases(shell: &Shell, repo: &str, arch: Arch) -> anyhow::Result anyhow::Result> { +fn get_solc_releases(arch: Arch) -> anyhow::Result> { let (arch_str, compatible_archs) = match arch { Arch::LinuxAmd => ("linux-amd64", vec![Arch::LinuxAmd, Arch::LinuxArm]), Arch::LinuxArm => ("linux-amd64", vec![Arch::LinuxAmd, Arch::LinuxArm]), @@ -123,13 +120,15 @@ fn get_solc_releases(shell: &Shell, arch: Arch) -> anyhow::Result> Arch::MacosArm => ("macosx-amd64", vec![Arch::MacosAmd, Arch::MacosArm]), }; - let response: std::process::Output = Cmd::new(cmd!( - shell, - "curl https://raw.githubusercontent.com/ethereum/solc-bin/gh-pages/{arch_str}/list.json" - )) - .run_with_output()?; + let client = reqwest::blocking::Client::new(); + let response = client + .get(format!( + "https://raw.githubusercontent.com/ethereum/solc-bin/gh-pages/{arch_str}/list.json" + )) + .header("User-Agent", "zkstack") + .send()? + .text()?; - let response = String::from_utf8(response.stdout)?; let solc_list: SolcList = serde_json::from_str(&response)?; let mut versions = vec![]; diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/init.rs similarity index 96% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/init.rs index f376a0d36eca..b173ad9bbb7f 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/init.rs @@ -89,7 +89,8 @@ fn download_binary( let spinner = Spinner::new(&msg_downloading_binary_spinner(name, version)); Cmd::new(cmd!(shell, "mkdir -p {path}")).run()?; - Cmd::new(cmd!(shell, "wget {url} -O {binary_path}")).run()?; + let response = reqwest::blocking::get(url)?.bytes()?; + shell.write_file(binary_path.clone(), &response)?; Cmd::new(cmd!(shell, "chmod +x {binary_path}")).run()?; spinner.finish(); diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/mod.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/run.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs index 803e962c0ff8..4cb419ce7a46 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs @@ -4,7 +4,7 @@ use common::{docker, logger}; use config::{EcosystemConfig, DOCKER_COMPOSE_FILE}; use xshell::Shell; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_CONTRACTS_CLEANING, MSG_CONTRACTS_CLEANING_FINISHED, MSG_DOCKER_COMPOSE_CLEANED, MSG_DOCKER_COMPOSE_DOWN, MSG_DOCKER_COMPOSE_REMOVE_VOLUMES, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/config_writer.rs similarity index 96% rename from zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/config_writer.rs index 04e019936e17..70238ed15f32 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/config_writer.rs @@ -4,7 +4,7 @@ use common::{logger, Prompt}; use config::{override_config, EcosystemConfig}; use xshell::Shell; -use crate::messages::{ +use crate::commands::dev::messages::{ msg_overriding_config, MSG_CHAIN_NOT_FOUND_ERR, MSG_OVERRIDE_CONFIG_PATH_HELP, MSG_OVERRIDE_SUCCESS, MSG_OVERRRIDE_CONFIG_PATH_PROMPT, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs similarity index 99% rename from zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs index bab4205cd66f..6f420e66ba03 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs @@ -5,7 +5,7 @@ use common::{cmd::Cmd, logger, spinner::Spinner}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_BUILDING_CONTRACTS, MSG_BUILDING_CONTRACTS_SUCCESS, MSG_BUILDING_L1_CONTRACTS_SPINNER, MSG_BUILDING_L2_CONTRACTS_SPINNER, MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER, MSG_BUILDING_TEST_CONTRACTS_SPINNER, MSG_BUILD_L1_CONTRACTS_HELP, MSG_BUILD_L2_CONTRACTS_HELP, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/mod.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/mod.rs index cf9dfc2834a8..f05e3ee1c0e0 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/mod.rs @@ -1,6 +1,6 @@ use clap::Parser; -use crate::{ +use crate::commands::dev::{ dals::SelectedDals, messages::{ MSG_DATABASE_COMMON_CORE_HELP, MSG_DATABASE_COMMON_CORE_URL_HELP, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/new_migration.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/new_migration.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/args/new_migration.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/new_migration.rs index 64b7a507abea..b91b048be784 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/new_migration.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/new_migration.rs @@ -2,7 +2,7 @@ use clap::{Parser, ValueEnum}; use common::{Prompt, PromptSelect}; use strum::{Display, EnumIter, IntoEnumIterator}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_DATABASE_NEW_MIGRATION_DATABASE_HELP, MSG_DATABASE_NEW_MIGRATION_DB_PROMPT, MSG_DATABASE_NEW_MIGRATION_NAME_HELP, MSG_DATABASE_NEW_MIGRATION_NAME_PROMPT, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/check_sqlx_data.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/check_sqlx_data.rs index 0c401595690e..990fca78641f 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/check_sqlx_data.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/drop.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/drop.rs index 94bf325a2c6c..a5578d41f77a 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/drop.rs @@ -6,7 +6,7 @@ use common::{ use xshell::Shell; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_DROP_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/migrate.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/migrate.rs index 1d648965c244..fd22f769742e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/migrate.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_MIGRATE_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/mod.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/mod.rs index 415b81879f1b..ed039fc65019 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/mod.rs @@ -2,7 +2,7 @@ use clap::Subcommand; use xshell::Shell; use self::args::{new_migration::DatabaseNewMigrationArgs, DatabaseCommonArgs}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_DATABASE_CHECK_SQLX_DATA_ABOUT, MSG_DATABASE_DROP_ABOUT, MSG_DATABASE_MIGRATE_ABOUT, MSG_DATABASE_NEW_MIGRATION_ABOUT, MSG_DATABASE_PREPARE_ABOUT, MSG_DATABASE_RESET_ABOUT, MSG_DATABASE_SETUP_ABOUT, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/new_migration.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/new_migration.rs index e21b7cde47ba..2d9fa1030538 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/new_migration.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::new_migration::{DatabaseNewMigrationArgs, SelectedDatabase}; -use crate::{ +use crate::commands::dev::{ dals::{get_core_dal, get_prover_dal, Dal}, messages::{msg_database_new_migration_loading, MSG_DATABASE_NEW_MIGRATION_SUCCESS}, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/prepare.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/prepare.rs index 82ec12f94129..288a68452fd5 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/prepare.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_PREPARE_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/reset.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/reset.rs index f0262cecb959..55d5ab1cbfcb 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/reset.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::Shell; use super::{args::DatabaseCommonArgs, drop::drop_database, setup::setup_database}; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_RESET_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/setup.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/setup.rs index 15b3ac5c1c72..74ade66ba481 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/setup.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_SETUP_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/fmt.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/fmt.rs index a6db4643c30a..ebaf27845e0a 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/fmt.rs @@ -6,7 +6,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::sql_fmt::format_sql; -use crate::{ +use crate::commands::dev::{ commands::lint_utils::{get_unignored_files, Target}, messages::{ msg_running_fmt_for_extension_spinner, msg_running_fmt_for_extensions_spinner, @@ -42,7 +42,7 @@ async fn prettier_contracts(shell: Shell, check: bool) -> anyhow::Result<()> { } async fn rustfmt(shell: Shell, check: bool, link_to_code: PathBuf) -> anyhow::Result<()> { - for dir in [".", "prover", "zk_toolbox"] { + for dir in [".", "prover", "zkstack_cli"] { let spinner = Spinner::new(&msg_running_rustfmt_for_dir_spinner(dir)); let _dir = shell.push_dir(link_to_code.join(dir)); let mut cmd = cmd!(shell, "cargo fmt -- --config imports_granularity=Crate --config group_imports=StdExternalCrate"); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs similarity index 95% rename from zk_toolbox/crates/zk_supervisor/src/commands/lint.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs index 45a7a46ebbe0..71f21a02e739 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs @@ -3,7 +3,7 @@ use common::{cmd::Cmd, logger, spinner::Spinner}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::{ +use crate::commands::dev::{ commands::lint_utils::{get_unignored_files, Target}, messages::{ msg_running_linter_for_extension_spinner, msg_running_linters_for_files, @@ -55,8 +55,8 @@ fn lint_rs(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::R let link_to_code = &ecosystem.link_to_code; let lint_to_prover = &ecosystem.link_to_code.join("prover"); - let link_to_toolbox = &ecosystem.link_to_code.join("zk_toolbox"); - let paths = vec![link_to_code, lint_to_prover, link_to_toolbox]; + let link_to_zkstack = &ecosystem.link_to_code.join("zkstack_cli"); + let paths = vec![link_to_code, lint_to_prover, link_to_zkstack]; spinner.freeze(); for path in paths { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_batch.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/insert_batch.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_batch.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/insert_batch.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_version.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/insert_version.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_version.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/insert_version.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/mod.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/info.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/info.rs index 441edb2c4b2d..84873e931b3e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/info.rs @@ -8,7 +8,7 @@ use common::logger; use config::{ChainConfig, EcosystemConfig}; use xshell::{cmd, Shell}; -use crate::messages::MSG_CHAIN_NOT_FOUND_ERR; +use crate::commands::dev::messages::MSG_CHAIN_NOT_FOUND_ERR; pub async fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_batch.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_batch.rs index 8c2cdd4d88dd..0e0c0ba33af4 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_batch.rs @@ -2,7 +2,7 @@ use common::{check_prerequisites, cmd::Cmd, logger, PROVER_CLI_PREREQUISITE}; use config::{get_link_to_prover, EcosystemConfig}; use xshell::{cmd, Shell}; -use crate::{ +use crate::commands::dev::{ commands::prover::{ args::insert_batch::{InsertBatchArgs, InsertBatchArgsFinal}, info, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_version.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_version.rs index 3dd9b7e0a1b1..f7bd175f577a 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_version.rs @@ -2,7 +2,7 @@ use common::{check_prerequisites, cmd::Cmd, logger, PROVER_CLI_PREREQUISITE}; use config::{get_link_to_prover, EcosystemConfig}; use xshell::{cmd, Shell}; -use crate::{ +use crate::commands::dev::{ commands::prover::{ args::insert_version::{InsertVersionArgs, InsertVersionArgsFinal}, info, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/mod.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/args/mod.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/args/mod.rs index e3d4f220ff28..03d9ec9b7360 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/args/mod.rs @@ -4,7 +4,7 @@ use clap::Parser; use common::Prompt; use url::Url; -use crate::{ +use crate::commands::dev::{ defaults::LOCAL_RPC_URL, messages::{ MSG_INVALID_L1_RPC_URL_ERR, MSG_PROMPT_L1_RPC_URL, MSG_PROMPT_SECRET_KEY, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/mod.rs similarity index 99% rename from zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/mod.rs index 79d8efc600e8..2f54579ade9e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/mod.rs @@ -17,7 +17,7 @@ use tokio::time::sleep; use xshell::Shell; use zksync_basic_types::{H160, U256}; -use crate::{ +use crate::commands::dev::{ consts::DEFAULT_UNSIGNED_TRANSACTIONS_DIR, messages::{ msg_send_txns_outro, MSG_FAILED_TO_SEND_TXN_ERR, MSG_UNABLE_TO_OPEN_FILE_ERR, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/snapshot.rs similarity index 91% rename from zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/snapshot.rs index 608c56233348..8e4c7183cb55 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/snapshot.rs @@ -4,7 +4,7 @@ use common::{cmd::Cmd, logger}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_RUNNING_SNAPSHOT_CREATOR}; +use crate::commands::dev::messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_RUNNING_SNAPSHOT_CREATOR}; #[derive(Subcommand, Debug)] pub enum SnapshotCommands { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/sql_fmt.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/sql_fmt.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/sql_fmt.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/sql_fmt.rs index ede2500e6ab6..0f7ce061ce18 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/sql_fmt.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/sql_fmt.rs @@ -6,7 +6,7 @@ use sqruff_lib::{api::simple::get_simple_config, core::linter::core::Linter}; use xshell::Shell; use super::lint_utils::{get_unignored_files, IgnoredData, Target}; -use crate::messages::{msg_file_is_not_formatted, MSG_RUNNING_SQL_FMT_SPINNER}; +use crate::commands::dev::messages::{msg_file_is_not_formatted, MSG_RUNNING_SQL_FMT_SPINNER}; fn format_query(query: &str) -> anyhow::Result { let exclude_rules = vec!["LT12".to_string()]; // avoid adding newline before `$` character @@ -138,7 +138,7 @@ pub async fn format_sql(shell: Shell, check: bool) -> anyhow::Result<()> { let spinner = Spinner::new(MSG_RUNNING_SQL_FMT_SPINNER); let ignored_data = Some(IgnoredData { files: vec![], - dirs: vec!["zk_toolbox".to_string()], + dirs: vec!["zkstack_cli".to_string()], }); let rust_files = get_unignored_files(&shell, &Target::Rs, ignored_data)?; for file in rust_files { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/fees.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs similarity index 78% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/fees.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs index 1337566e5369..83d505aa5753 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/fees.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs @@ -1,7 +1,7 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::{MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP}; +use crate::commands::dev::messages::{MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct FeesArgs { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/integration.rs similarity index 63% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/integration.rs index 435dddfc360c..625df0fc151e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/integration.rs @@ -1,7 +1,9 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::{MSG_NO_DEPS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP, MSG_TEST_PATTERN_HELP}; +use crate::commands::dev::messages::{ + MSG_NO_DEPS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP, MSG_TEST_PATTERN_HELP, +}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct IntegrationArgs { @@ -9,6 +11,6 @@ pub struct IntegrationArgs { pub external_node: bool, #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, - #[clap(short, long, help = MSG_TEST_PATTERN_HELP)] + #[clap(short, long, help = MSG_TEST_PATTERN_HELP, allow_hyphen_values(true))] pub test_pattern: Option, } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/mod.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/recovery.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs similarity index 76% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/recovery.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs index 81cc58fbd9bd..cf4734fd82e7 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/recovery.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs @@ -1,7 +1,9 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::{MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP, MSG_TESTS_RECOVERY_SNAPSHOT_HELP}; +use crate::commands::dev::messages::{ + MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP, MSG_TESTS_RECOVERY_SNAPSHOT_HELP, +}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct RecoveryArgs { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs similarity index 93% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs index 0154a4c0afd7..e4fb7fba2a97 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs @@ -1,6 +1,6 @@ use clap::Parser; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP, MSG_REVERT_TEST_ENABLE_CONSENSUS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/rust.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/rust.rs similarity index 70% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/rust.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/rust.rs index 2d94adc3f6a7..6ca277f6a2fc 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/rust.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/rust.rs @@ -1,6 +1,6 @@ use clap::Parser; -use crate::messages::MSG_TEST_RUST_OPTIONS_HELP; +use crate::commands::dev::messages::MSG_TEST_RUST_OPTIONS_HELP; #[derive(Debug, Parser)] pub struct RustArgs { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/upgrade.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/upgrade.rs similarity index 72% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/upgrade.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/upgrade.rs index dd96957e9d3b..7b631b91e9a5 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/upgrade.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/upgrade.rs @@ -1,6 +1,6 @@ use clap::Parser; -use crate::messages::MSG_NO_DEPS_HELP; +use crate::commands::dev::messages::MSG_NO_DEPS_HELP; #[derive(Debug, Parser)] pub struct UpgradeArgs { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/build.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/build.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/build.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/build.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/db.rs similarity index 88% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/db.rs index d173bb951685..19f6307019b8 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/db.rs @@ -3,7 +3,7 @@ use std::path::Path; use common::{cmd::Cmd, db::wait_for_db, logger}; use xshell::{cmd, Shell}; -use crate::{commands::database, dals::Dal, messages::MSG_RESETTING_TEST_DATABASES}; +use crate::commands::dev::{commands::database, dals::Dal, messages::MSG_RESETTING_TEST_DATABASES}; pub async fn reset_test_databases( shell: &Shell, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/fees.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/fees.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/fees.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/fees.rs index e0b881a14dbb..e58a70e6b7cb 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/fees.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/fees.rs @@ -9,7 +9,7 @@ use super::{ args::fees::FeesArgs, utils::{build_contracts, install_and_build_dependencies, TS_INTEGRATION_PATH}, }; -use crate::{ +use crate::commands::dev::{ commands::test::utils::{TestWallets, TEST_WALLETS_PATH}, messages::{ MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/integration.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/integration.rs index 5107abf6a59b..3bc3093bf936 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/integration.rs @@ -12,7 +12,7 @@ use super::{ TS_INTEGRATION_PATH, }, }; -use crate::messages::{ +use crate::commands::dev::messages::{ msg_integration_tests_run, MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_INTEGRATION_TESTS_RUN_SUCCESS, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/l1_contracts.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/l1_contracts.rs similarity index 86% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/l1_contracts.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/l1_contracts.rs index 0a1e1ec5203f..7d163daed671 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/l1_contracts.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/l1_contracts.rs @@ -2,7 +2,7 @@ use common::{cmd::Cmd, logger}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::messages::MSG_L1_CONTRACTS_TEST_SUCCESS; +use crate::commands::dev::messages::MSG_L1_CONTRACTS_TEST_SUCCESS; pub fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem = EcosystemConfig::from_file(shell)?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/loadtest.rs similarity index 95% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/loadtest.rs index ee307438ec97..72a8f97ff97d 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/loadtest.rs @@ -3,7 +3,7 @@ use common::{cmd::Cmd, config::global_config, logger}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::messages::MSG_CHAIN_NOT_FOUND_ERR; +use crate::commands::dev::messages::MSG_CHAIN_NOT_FOUND_ERR; pub fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/mod.rs similarity index 92% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/mod.rs index ae6b4518e6db..095e27652aa0 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/mod.rs @@ -5,7 +5,7 @@ use args::{ use clap::Subcommand; use xshell::Shell; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_BUILD_ABOUT, MSG_INTEGRATION_TESTS_ABOUT, MSG_L1_CONTRACTS_ABOUT, MSG_LOADTEST_ABOUT, MSG_PROVER_TEST_ABOUT, MSG_RECOVERY_TEST_ABOUT, MSG_REVERT_TEST_ABOUT, MSG_RUST_TEST_ABOUT, MSG_TEST_WALLETS_INFO, MSG_UPGRADE_TEST_ABOUT, @@ -30,7 +30,7 @@ mod wallet; pub enum TestCommands { #[clap(about = MSG_INTEGRATION_TESTS_ABOUT, alias = "i")] Integration(IntegrationArgs), - #[clap(about = "Run fees test", alias = "i")] + #[clap(about = "Run fees test", alias = "f")] Fees(FeesArgs), #[clap(about = MSG_REVERT_TEST_ABOUT, alias = "r")] Revert(RevertArgs), @@ -40,7 +40,7 @@ pub enum TestCommands { Upgrade(UpgradeArgs), #[clap(about = MSG_BUILD_ABOUT)] Build, - #[clap(about = MSG_RUST_TEST_ABOUT, alias = "unit")] + #[clap(about = MSG_RUST_TEST_ABOUT, alias = "unit", allow_hyphen_values(true))] Rust(RustArgs), #[clap(about = MSG_L1_CONTRACTS_ABOUT, alias = "l1")] L1Contracts, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/prover.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/prover.rs index f48b359a9357..200baf57215c 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/prover.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use url::Url; use xshell::{cmd, Shell}; -use crate::{ +use crate::commands::dev::{ commands::test::db::reset_test_databases, dals::{Dal, PROVER_DAL_PATH}, defaults::TEST_DATABASE_PROVER_URL, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/recovery.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/recovery.rs index 6a3e337d41e9..ae889969fd2c 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/recovery.rs @@ -9,7 +9,7 @@ use super::{ args::recovery::RecoveryArgs, utils::{install_and_build_dependencies, TestWallets, TEST_WALLETS_PATH}, }; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_RECOVERY_TEST_RUN_INFO, MSG_RECOVERY_TEST_RUN_SUCCESS, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/revert.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/revert.rs index 8b00e9d7f4d1..dc95c88db205 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/revert.rs @@ -9,7 +9,7 @@ use super::{ args::revert::RevertArgs, utils::{install_and_build_dependencies, TestWallets, TEST_WALLETS_PATH}, }; -use crate::messages::{ +use crate::commands::dev::messages::{ msg_revert_tests_run, MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_REVERT_TEST_RUN_INFO, MSG_REVERT_TEST_RUN_SUCCESS, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/rust.rs similarity index 94% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/rust.rs index 7011e0f0f874..8c0c707f6a2e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/rust.rs @@ -7,7 +7,7 @@ use url::Url; use xshell::{cmd, Shell}; use super::args::rust::RustArgs; -use crate::{ +use crate::commands::dev::{ commands::test::db::reset_test_databases, dals::{Dal, CORE_DAL_PATH, PROVER_DAL_PATH}, defaults::{TEST_DATABASE_PROVER_URL, TEST_DATABASE_SERVER_URL}, @@ -75,8 +75,8 @@ pub async fn run(shell: &Shell, args: RustArgs) -> anyhow::Result<()> { .env("TEST_PROVER_DATABASE_URL", test_prover_url); cmd.run()?; - // Run unit tests for zk_toolbox - let _dir_guard = shell.push_dir(link_to_code.join("zk_toolbox")); + // Run unit tests for ZK Stack CLI + let _dir_guard = shell.push_dir(link_to_code.join("zkstack_cli")); Cmd::new(cmd!(shell, "cargo nextest run --release")) .with_force_run() .run()?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/upgrade.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/upgrade.rs similarity index 91% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/upgrade.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/upgrade.rs index 9bd04b81ef34..707e0086ed15 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/upgrade.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/upgrade.rs @@ -3,7 +3,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::{args::upgrade::UpgradeArgs, utils::install_and_build_dependencies}; -use crate::messages::{MSG_UPGRADE_TEST_RUN_INFO, MSG_UPGRADE_TEST_RUN_SUCCESS}; +use crate::commands::dev::messages::{MSG_UPGRADE_TEST_RUN_INFO, MSG_UPGRADE_TEST_RUN_SUCCESS}; const UPGRADE_TESTS_PATH: &str = "core/tests/upgrade-test"; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/utils.rs similarity index 93% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/utils.rs index 8656ff44d319..bcd524bd2cb0 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/utils.rs @@ -10,7 +10,7 @@ use ethers::{ use serde::Deserialize; use xshell::{cmd, Shell}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS, MSG_INTEGRATION_TESTS_BUILDING_DEPENDENCIES, }; @@ -43,10 +43,11 @@ impl TestWallets { } pub fn get_test_pk(&self, chain_config: &ChainConfig) -> anyhow::Result { - self.get_test_wallet(chain_config)? - .private_key - .ok_or(anyhow::Error::msg("Private key not found")) - .map(|pk| pk.encode_hex::()) + Ok(self + .get_test_wallet(chain_config)? + .private_key_h256() + .context("Private key not found")? + .encode_hex()) } pub async fn init_test_wallet( diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/wallet.rs similarity index 96% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/wallet.rs index 62f32b50d559..6953014bf92b 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/wallet.rs @@ -6,7 +6,7 @@ use config::EcosystemConfig; use xshell::Shell; use super::utils::{TestWallets, TEST_WALLETS_PATH}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_TEST_WALLETS_INFO, MSG_WALLETS_TEST_SUCCESS, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/consts.rs b/zkstack_cli/crates/zkstack/src/commands/dev/consts.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/consts.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/consts.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/dals.rs b/zkstack_cli/crates/zkstack/src/commands/dev/dals.rs similarity index 95% rename from zk_toolbox/crates/zk_supervisor/src/dals.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/dals.rs index 962a848fe00c..9626edfed732 100644 --- a/zk_toolbox/crates/zk_supervisor/src/dals.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/dals.rs @@ -1,9 +1,9 @@ -use anyhow::{anyhow, Context}; +use anyhow::Context as _; use config::{EcosystemConfig, SecretsConfig}; use url::Url; use xshell::Shell; -use crate::{ +use super::{ commands::database::args::DalUrls, messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_DATABASE_MUST_BE_PRESENTED}, }; @@ -91,7 +91,7 @@ fn get_secrets(shell: &Shell) -> anyhow::Result { let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config .load_current_chain() - .ok_or(anyhow!(MSG_CHAIN_NOT_FOUND_ERR))?; + .context(MSG_CHAIN_NOT_FOUND_ERR)?; let secrets = chain_config.get_secrets_config()?; Ok(secrets) diff --git a/zk_toolbox/crates/zk_supervisor/src/defaults.rs b/zkstack_cli/crates/zkstack/src/commands/dev/defaults.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/defaults.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/defaults.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/messages.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/messages.rs index 6f6deb22edbf..00617e26064f 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs @@ -1,12 +1,8 @@ -use crate::commands::lint_utils::Target; +use super::commands::lint_utils::Target; // Ecosystem related messages pub(super) const MSG_CHAIN_NOT_FOUND_ERR: &str = "Chain not found"; -pub(super) fn msg_global_chain_does_not_exist(chain: &str, available_chains: &str) -> String { - format!("Chain with name {chain} doesnt exist, please choose one of: {available_chains}") -} - // Subcommands help pub(super) const MSG_PROVER_VERSION_ABOUT: &str = "Protocol version used by provers"; pub(super) const MSG_SUBCOMMAND_DATABASE_ABOUT: &str = "Database related commands"; diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs new file mode 100644 index 000000000000..e8d23f15b699 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs @@ -0,0 +1,61 @@ +use clap::Subcommand; +use xshell::Shell; + +use self::commands::{ + clean::CleanCommands, config_writer::ConfigWriterArgs, contracts::ContractsArgs, + database::DatabaseCommands, fmt::FmtArgs, lint::LintArgs, prover::ProverCommands, + send_transactions::args::SendTransactionsArgs, snapshot::SnapshotCommands, test::TestCommands, +}; +use crate::commands::dev::messages::{ + MSG_CONFIG_WRITER_ABOUT, MSG_CONTRACTS_ABOUT, MSG_PROVER_VERSION_ABOUT, MSG_SEND_TXNS_ABOUT, + MSG_SUBCOMMAND_CLEAN, MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, + MSG_SUBCOMMAND_LINT_ABOUT, MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, +}; + +mod commands; +mod consts; +mod dals; +mod defaults; +mod messages; + +#[derive(Subcommand, Debug)] +pub enum DevCommands { + #[command(subcommand, about = MSG_SUBCOMMAND_DATABASE_ABOUT, alias = "db")] + Database(DatabaseCommands), + #[command(subcommand, about = MSG_SUBCOMMAND_TESTS_ABOUT, alias = "t")] + Test(TestCommands), + #[command(subcommand, about = MSG_SUBCOMMAND_CLEAN)] + Clean(CleanCommands), + #[command(subcommand, about = MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT)] + Snapshot(SnapshotCommands), + #[command(about = MSG_SUBCOMMAND_LINT_ABOUT, alias = "l")] + Lint(LintArgs), + #[command(about = MSG_SUBCOMMAND_FMT_ABOUT)] + Fmt(FmtArgs), + #[command(subcommand, about = MSG_PROVER_VERSION_ABOUT)] + Prover(ProverCommands), + #[command(about = MSG_CONTRACTS_ABOUT)] + Contracts(ContractsArgs), + #[command(about = MSG_CONFIG_WRITER_ABOUT, alias = "o")] + ConfigWriter(ConfigWriterArgs), + #[command(about = MSG_SEND_TXNS_ABOUT)] + SendTransactions(SendTransactionsArgs), +} + +pub async fn run(shell: &Shell, args: DevCommands) -> anyhow::Result<()> { + match args { + DevCommands::Database(command) => commands::database::run(shell, command).await?, + DevCommands::Test(command) => commands::test::run(shell, command).await?, + DevCommands::Clean(command) => commands::clean::run(shell, command)?, + DevCommands::Snapshot(command) => commands::snapshot::run(shell, command).await?, + DevCommands::Lint(args) => commands::lint::run(shell, args)?, + DevCommands::Fmt(args) => commands::fmt::run(shell.clone(), args).await?, + DevCommands::Prover(command) => commands::prover::run(shell, command).await?, + DevCommands::Contracts(args) => commands::contracts::run(shell, args)?, + DevCommands::ConfigWriter(args) => commands::config_writer::run(shell, args)?, + DevCommands::SendTransactions(args) => { + commands::send_transactions::run(shell, args).await? + } + } + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/build_transactions.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/build_transactions.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/build_transactions.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/change_default.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/change_default.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/change_default.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/change_default.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/build_transactions.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/build_transactions.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/build_transactions.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/change_default.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/change_default.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/change_default.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/change_default.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs similarity index 99% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs index 950d39876b09..42b8f79b97eb 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs @@ -54,7 +54,7 @@ pub async fn deploy_l1( if let Some(address) = sender { forge = forge.with_sender(address); } else { - forge = fill_forge_private_key(forge, wallets_config.deployer_private_key())?; + forge = fill_forge_private_key(forge, wallets_config.deployer.as_ref())?; } if broadcast { diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/create.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/create.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/create_configs.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/create_configs.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs similarity index 96% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs index 6b64b740aedd..bf5a4605c09c 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs @@ -151,10 +151,7 @@ async fn deploy_erc20( .with_rpc_url(l1_rpc_url) .with_broadcast(); - forge = fill_forge_private_key( - forge, - ecosystem_config.get_wallets()?.deployer_private_key(), - )?; + forge = fill_forge_private_key(forge, ecosystem_config.get_wallets()?.deployer.as_ref())?; let spinner = Spinner::new(MSG_DEPLOYING_ERC20_SPINNER); check_the_balance(&forge).await?; @@ -262,7 +259,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.governance_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config.ecosystem_contracts.bridgehub_proxy_addr, &forge_args, l1_rpc_url.clone(), @@ -273,7 +270,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.chain_admin_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config.ecosystem_contracts.bridgehub_proxy_addr, &forge_args, l1_rpc_url.clone(), @@ -284,7 +281,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.governance_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config.bridges.shared.l1_address, &forge_args, l1_rpc_url.clone(), @@ -295,7 +292,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.chain_admin_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config.bridges.shared.l1_address, &forge_args, l1_rpc_url.clone(), @@ -306,7 +303,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.governance_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config .ecosystem_contracts .state_transition_proxy_addr, @@ -319,7 +316,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.chain_admin_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config .ecosystem_contracts .state_transition_proxy_addr, diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/setup_observability.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/setup_observability.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/setup_observability.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/setup_observability.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/utils.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/utils.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/utils.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/utils.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs b/zkstack_cli/crates/zkstack/src/commands/explorer/backend.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs rename to zkstack_cli/crates/zkstack/src/commands/explorer/backend.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs b/zkstack_cli/crates/zkstack/src/commands/explorer/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs rename to zkstack_cli/crates/zkstack/src/commands/explorer/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs b/zkstack_cli/crates/zkstack/src/commands/explorer/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/explorer/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs b/zkstack_cli/crates/zkstack/src/commands/explorer/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs rename to zkstack_cli/crates/zkstack/src/commands/explorer/run.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/prepare_configs.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/args/prepare_configs.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/args/prepare_configs.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/args/prepare_configs.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/args/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/args/run.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/init.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/init.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/mod.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs similarity index 82% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs index 5ab859d17f0a..d714a0f8e843 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs +++ b/zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs @@ -6,12 +6,12 @@ use config::{ external_node::ENConfig, set_rocks_db_config, traits::{FileConfigWithDefaultName, SaveConfigWithBasePath}, - ChainConfig, EcosystemConfig, GeneralConfig, SecretsConfig, DEFAULT_CONSENSUS_PORT, + ChainConfig, EcosystemConfig, GeneralConfig, SecretsConfig, }; use xshell::Shell; use zksync_basic_types::url::SensitiveUrl; use zksync_config::configs::{ - consensus::{ConsensusSecrets, NodeSecretKey, Secret}, + consensus::{ConsensusConfig, ConsensusSecrets, NodeSecretKey, Secret}, DatabaseSecrets, L1Secrets, }; use zksync_consensus_crypto::TextFmt; @@ -19,14 +19,13 @@ use zksync_consensus_roles as roles; use crate::{ commands::external_node::args::prepare_configs::{PrepareConfigArgs, PrepareConfigFinal}, - defaults::PORT_RANGE_END, messages::{ msg_preparing_en_config_is_done, MSG_CHAIN_NOT_INITIALIZED, MSG_CONSENSUS_CONFIG_MISSING_ERR, MSG_CONSENSUS_SECRETS_MISSING_ERR, MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR, MSG_PREPARING_EN_CONFIGS, }, utils::{ - consensus::{get_consensus_config, node_public_key}, + consensus::node_public_key, ports::EcosystemPortsScanner, rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, }, @@ -79,19 +78,12 @@ fn prepare_configs( bridge_addresses_refresh_interval_sec: None, }; let mut general_en = general.clone(); + general_en.consensus_config = None; let main_node_consensus_config = general .consensus_config .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; - - // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` - ports.add_port_info( - main_node_consensus_config.server_addr.port(), - "Main node consensus".to_string(), - ); - let offset = ((config.id - 1) * 100) as u16; - let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; - let consensus_port = ports.allocate_port(consensus_port_range, "Consensus".to_string())?; + let mut en_consensus_config = main_node_consensus_config.clone(); let mut gossip_static_outbound = BTreeMap::new(); let main_node_public_key = node_public_key( @@ -101,13 +93,8 @@ fn prepare_configs( .context(MSG_CONSENSUS_SECRETS_MISSING_ERR)?, )? .context(MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR)?; - gossip_static_outbound.insert(main_node_public_key, main_node_consensus_config.public_addr); - - let en_consensus_config = - get_consensus_config(config, consensus_port, None, Some(gossip_static_outbound))?; - general_en.consensus_config = Some(en_consensus_config.clone()); - en_consensus_config.save_with_base_path(shell, en_configs_path)?; + en_consensus_config.gossip_static_outbound = gossip_static_outbound; // Set secrets config let node_key = roles::node::SecretKey::generate().encode(); @@ -128,16 +115,25 @@ fn prepare_configs( }), data_availability: None, }; - secrets.save_with_base_path(shell, en_configs_path)?; + let dirs = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::ExternalNode)?; set_rocks_db_config(&mut general_en, dirs)?; + general_en.save_with_base_path(shell, en_configs_path)?; en_config.save_with_base_path(shell, en_configs_path)?; + en_consensus_config.save_with_base_path(shell, en_configs_path)?; + secrets.save_with_base_path(shell, en_configs_path)?; + let offset = 0; // This is zero because general_en ports already have a chain offset ports.allocate_ports_in_yaml( shell, &GeneralConfig::get_path_with_base_path(en_configs_path), - 0, // This is zero because general_en ports already have a chain offset + offset, + )?; + ports.allocate_ports_in_yaml( + shell, + &ConsensusConfig::get_path_with_base_path(en_configs_path), + offset, )?; Ok(()) diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/run.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/mod.rs b/zkstack_cli/crates/zkstack/src/commands/mod.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/mod.rs index 78a46797602f..c46400cc8657 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/mod.rs @@ -3,6 +3,7 @@ pub mod chain; pub mod consensus; pub mod containers; pub mod contract_verifier; +pub mod dev; pub mod ecosystem; pub mod explorer; pub mod external_node; diff --git a/zk_toolbox/crates/zk_inception/src/commands/portal.rs b/zkstack_cli/crates/zkstack/src/commands/portal.rs similarity index 98% rename from zk_toolbox/crates/zk_inception/src/commands/portal.rs rename to zkstack_cli/crates/zkstack/src/commands/portal.rs index 5bf211211779..f9e7fe358609 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/portal.rs +++ b/zkstack_cli/crates/zkstack/src/commands/portal.rs @@ -107,7 +107,7 @@ async fn validate_portal_config( continue; } // Append missing chain, chain might not be initialized, so ignoring errors - if let Some(chain_config) = ecosystem_config.load_chain(Some(chain_name.clone())) { + if let Ok(chain_config) = ecosystem_config.load_chain(Some(chain_name.clone())) { if let Ok(portal_chain_config) = build_portal_chain_config(&chain_config).await { portal_config.add_chain_config(&portal_chain_config); } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/compressor_keys.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/compressor_keys.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/setup_keys.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/setup_keys.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs b/zkstack_cli/crates/zkstack/src/commands/prover/compressor_keys.rs similarity index 80% rename from zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/compressor_keys.rs index 703ecc18c4cb..a3d40c957281 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/compressor_keys.rs @@ -1,7 +1,7 @@ use anyhow::Context; -use common::{check_prerequisites, cmd::Cmd, spinner::Spinner, WGET_PREREQUISITE}; +use common::spinner::Spinner; use config::{get_link_to_prover, EcosystemConfig, GeneralConfig}; -use xshell::{cmd, Shell}; +use xshell::Shell; use super::args::compressor_keys::CompressorKeysArgs; use crate::messages::{ @@ -35,7 +35,6 @@ pub(crate) fn download_compressor_key( general_config: &mut GeneralConfig, path: &str, ) -> anyhow::Result<()> { - check_prerequisites(shell, &WGET_PREREQUISITE, false); let spinner = Spinner::new(MSG_DOWNLOADING_SETUP_COMPRESSOR_KEY_SPINNER); let mut compressor_config: zksync_config::configs::FriProofCompressorConfig = general_config .proof_compressor_config @@ -47,14 +46,13 @@ pub(crate) fn download_compressor_key( let url = compressor_config.universal_setup_download_url; let path = std::path::Path::new(path); - let parent = path.parent().expect(MSG_SETUP_KEY_PATH_ERROR); - let file_name = path.file_name().expect(MSG_SETUP_KEY_PATH_ERROR); - Cmd::new(cmd!(shell, "wget {url} -P {parent}")).run()?; + let client = reqwest::blocking::Client::builder() + .timeout(std::time::Duration::from_secs(600)) + .build()?; - if file_name != "setup_2^24.key" { - Cmd::new(cmd!(shell, "mv {parent}/setup_2^24.key {path}")).run()?; - } + let response = client.get(url).send()?.bytes()?; + shell.write_file(path, &response)?; spinner.finish(); Ok(()) diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs b/zkstack_cli/crates/zkstack/src/commands/prover/gcs.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/gcs.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs b/zkstack_cli/crates/zkstack/src/commands/prover/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/init.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs b/zkstack_cli/crates/zkstack/src/commands/prover/init_bellman_cuda.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/init_bellman_cuda.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs b/zkstack_cli/crates/zkstack/src/commands/prover/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/run.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/run.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs b/zkstack_cli/crates/zkstack/src/commands/prover/setup_keys.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/setup_keys.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/server.rs b/zkstack_cli/crates/zkstack/src/commands/server.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/server.rs rename to zkstack_cli/crates/zkstack/src/commands/server.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/update.rs b/zkstack_cli/crates/zkstack/src/commands/update.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/update.rs rename to zkstack_cli/crates/zkstack/src/commands/update.rs diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zkstack_cli/crates/zkstack/src/consts.rs similarity index 68% rename from zk_toolbox/crates/zk_inception/src/consts.rs rename to zkstack_cli/crates/zkstack/src/consts.rs index 9f81847e3336..df27d2f02d2c 100644 --- a/zk_toolbox/crates/zk_inception/src/consts.rs +++ b/zkstack_cli/crates/zkstack/src/consts.rs @@ -1,5 +1,3 @@ -use std::net::{IpAddr, Ipv4Addr}; - pub const AMOUNT_FOR_DISTRIBUTION_TO_WALLETS: u128 = 1000000000000000000000; pub const MINIMUM_BALANCE_FOR_WALLET: u128 = 5000000000000000000; @@ -12,27 +10,6 @@ pub const DEFAULT_UNSIGNED_TRANSACTIONS_DIR: &str = "transactions"; pub const BELLMAN_CUDA_DIR: &str = "era-bellman-cuda"; pub const L2_BASE_TOKEN_ADDRESS: &str = "0x000000000000000000000000000000000000800A"; -#[allow(non_upper_case_globals)] -const kB: usize = 1024; - -/// Max payload size for consensus in bytes -pub const MAX_PAYLOAD_SIZE: usize = 2_500_000; -/// Max batch size for consensus in bytes -/// Compute a default batch size, so operators are not caught out by the missing setting -/// while we're still working on batch syncing. The batch interval is ~1 minute, -/// so there will be ~60 blocks, and an Ethereum Merkle proof is ~1kB, but under high -/// traffic there can be thousands of huge transactions that quickly fill up blocks -/// and there could be more blocks in a batch then expected. We chose a generous -/// limit so as not to prevent any legitimate batch from being transmitted. -pub const MAX_BATCH_SIZE: usize = MAX_PAYLOAD_SIZE * 5000 + kB; -/// Gossip dynamic inbound limit for consensus -pub const GOSSIP_DYNAMIC_INBOUND_LIMIT: usize = 100; - -/// Public address for consensus -pub const CONSENSUS_PUBLIC_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); -/// Server address for consensus -pub const CONSENSUS_SERVER_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); - /// Path to the JS runtime config for the block-explorer-app docker container to be mounted to pub const EXPLORER_APP_DOCKER_CONFIG_PATH: &str = "/usr/src/app/packages/app/dist/config.js"; pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; diff --git a/zk_toolbox/crates/zk_inception/src/defaults.rs b/zkstack_cli/crates/zkstack/src/defaults.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/defaults.rs rename to zkstack_cli/crates/zkstack/src/defaults.rs diff --git a/zk_toolbox/crates/zk_inception/src/external_node.rs b/zkstack_cli/crates/zkstack/src/external_node.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/external_node.rs rename to zkstack_cli/crates/zkstack/src/external_node.rs diff --git a/zk_toolbox/crates/zk_inception/src/main.rs b/zkstack_cli/crates/zkstack/src/main.rs similarity index 96% rename from zk_toolbox/crates/zk_inception/src/main.rs rename to zkstack_cli/crates/zkstack/src/main.rs index a305ca053b7c..987de555ecf6 100644 --- a/zk_toolbox/crates/zk_inception/src/main.rs +++ b/zkstack_cli/crates/zkstack/src/main.rs @@ -2,6 +2,7 @@ use clap::{command, Parser, Subcommand}; use commands::{ args::{ContainersArgs, UpdateArgs}, contract_verifier::ContractVerifierCommands, + dev::DevCommands, }; use common::{ check_general_prerequisites, @@ -46,6 +47,9 @@ pub enum InceptionSubcommands { /// Chain related commands #[command(subcommand, alias = "c")] Chain(Box), + /// Chain related commands + #[command(subcommand)] + Dev(DevCommands), /// Prover related commands #[command(subcommand, alias = "p")] Prover(ProverCommands), @@ -123,6 +127,7 @@ async fn run_subcommand(inception_args: Inception, shell: &Shell) -> anyhow::Res match inception_args.command { InceptionSubcommands::Ecosystem(args) => commands::ecosystem::run(shell, *args).await?, InceptionSubcommands::Chain(args) => commands::chain::run(shell, *args).await?, + InceptionSubcommands::Dev(args) => commands::dev::run(shell, args).await?, InceptionSubcommands::Prover(args) => commands::prover::run(shell, args).await?, InceptionSubcommands::Server(args) => commands::server::run(shell, args)?, InceptionSubcommands::Containers(args) => commands::containers::run(shell, args)?, diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zkstack_cli/crates/zkstack/src/messages.rs similarity index 99% rename from zk_toolbox/crates/zk_inception/src/messages.rs rename to zkstack_cli/crates/zkstack/src/messages.rs index ebdcf7378a44..d1d86db83989 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zkstack_cli/crates/zkstack/src/messages.rs @@ -296,7 +296,7 @@ pub(super) const MSG_EXPLORER_FAILED_TO_RUN_DOCKER_ERR: &str = pub(super) const MSG_EXPLORER_FAILED_TO_CREATE_CONFIG_ERR: &str = "Failed to create explorer config"; pub(super) const MSG_EXPLORER_FAILED_TO_FIND_ANY_CHAIN_ERR: &str = - "Failed to find any valid chain to run explorer for. Did you run `zk_inception explorer init`?"; + "Failed to find any valid chain to run explorer for. Did you run `zkstack explorer init`?"; pub(super) const MSG_EXPLORER_INITIALIZED: &str = "Explorer has been initialized successfully"; pub(super) fn msg_explorer_initializing_database_for(chain: &str) -> String { format!("Initializing explorer database for {chain} chain") @@ -311,7 +311,7 @@ pub(super) fn msg_explorer_starting_on(host: &str, port: u16) -> String { format!("Starting explorer on http://{host}:{port}") } pub(super) fn msg_explorer_chain_not_initialized(chain: &str) -> String { - format!("Chain {chain} is not initialized for explorer: run `zk_inception explorer init --chain {chain}` first") + format!("Chain {chain} is not initialized for explorer: run `zkstack explorer init --chain {chain}` first") } /// Forge utils related messages diff --git a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs b/zkstack_cli/crates/zkstack/src/utils/consensus.rs similarity index 70% rename from zk_toolbox/crates/zk_inception/src/utils/consensus.rs rename to zkstack_cli/crates/zkstack/src/utils/consensus.rs index 2979b4df0c19..946d28a33fbd 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs +++ b/zkstack_cli/crates/zkstack/src/utils/consensus.rs @@ -1,24 +1,14 @@ -use std::{ - collections::{BTreeMap, BTreeSet}, - net::SocketAddr, -}; - use anyhow::Context as _; use config::ChainConfig; use secrecy::{ExposeSecret, Secret}; use zksync_config::configs::consensus::{ - AttesterPublicKey, AttesterSecretKey, ConsensusConfig, ConsensusSecrets, GenesisSpec, Host, - NodePublicKey, NodeSecretKey, ProtocolVersion, ValidatorPublicKey, ValidatorSecretKey, - WeightedAttester, WeightedValidator, + AttesterPublicKey, AttesterSecretKey, ConsensusSecrets, GenesisSpec, NodePublicKey, + NodeSecretKey, ProtocolVersion, ValidatorPublicKey, ValidatorSecretKey, WeightedAttester, + WeightedValidator, }; use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_roles::{attester, node, validator}; -use crate::consts::{ - CONSENSUS_PUBLIC_ADDRESS_HOST, CONSENSUS_SERVER_ADDRESS_HOST, GOSSIP_DYNAMIC_INBOUND_LIMIT, - MAX_BATCH_SIZE, MAX_PAYLOAD_SIZE, -}; - pub(crate) fn parse_attester_committee( attesters: &[WeightedAttester], ) -> anyhow::Result { @@ -48,32 +38,6 @@ pub struct ConsensusPublicKeys { attester_key: attester::PublicKey, } -pub fn get_consensus_config( - chain_config: &ChainConfig, - consensus_port: u16, - consensus_keys: Option, - gossip_static_outbound: Option>, -) -> anyhow::Result { - let genesis_spec = - consensus_keys.map(|consensus_keys| get_genesis_specs(chain_config, &consensus_keys)); - - let public_addr = SocketAddr::new(CONSENSUS_PUBLIC_ADDRESS_HOST, consensus_port); - let server_addr = SocketAddr::new(CONSENSUS_SERVER_ADDRESS_HOST, consensus_port); - - Ok(ConsensusConfig { - server_addr, - public_addr: Host(public_addr.encode()), - genesis_spec, - max_payload_size: MAX_PAYLOAD_SIZE, - gossip_dynamic_inbound_limit: GOSSIP_DYNAMIC_INBOUND_LIMIT, - max_batch_size: MAX_BATCH_SIZE, - gossip_static_inbound: BTreeSet::new(), - gossip_static_outbound: gossip_static_outbound.unwrap_or_default(), - rpc: None, - debug_page_addr: None, - }) -} - pub fn generate_consensus_keys() -> ConsensusSecretKeys { ConsensusSecretKeys { validator_key: validator::SecretKey::generate(), diff --git a/zk_toolbox/crates/zk_inception/src/utils/forge.rs b/zkstack_cli/crates/zkstack/src/utils/forge.rs similarity index 74% rename from zk_toolbox/crates/zk_inception/src/utils/forge.rs rename to zkstack_cli/crates/zkstack/src/utils/forge.rs index cabc8ff7566b..355cf7b5f930 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/forge.rs +++ b/zkstack_cli/crates/zkstack/src/utils/forge.rs @@ -1,6 +1,6 @@ -use anyhow::anyhow; -use common::forge::ForgeScript; -use ethers::types::{H256, U256}; +use anyhow::Context as _; +use common::{forge::ForgeScript, wallets::Wallet}; +use ethers::types::U256; use crate::{ consts::MINIMUM_BALANCE_FOR_WALLET, @@ -9,10 +9,14 @@ use crate::{ pub fn fill_forge_private_key( mut forge: ForgeScript, - private_key: Option, + wallet: Option<&Wallet>, ) -> anyhow::Result { if !forge.wallet_args_passed() { - forge = forge.with_private_key(private_key.ok_or(anyhow!(MSG_DEPLOYER_PK_NOT_SET_ERR))?); + forge = forge.with_private_key( + wallet + .and_then(|w| w.private_key_h256()) + .context(MSG_DEPLOYER_PK_NOT_SET_ERR)?, + ); } Ok(forge) } diff --git a/zk_toolbox/crates/zk_inception/src/utils/mod.rs b/zkstack_cli/crates/zkstack/src/utils/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/utils/mod.rs rename to zkstack_cli/crates/zkstack/src/utils/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/utils/ports.rs b/zkstack_cli/crates/zkstack/src/utils/ports.rs similarity index 96% rename from zk_toolbox/crates/zk_inception/src/utils/ports.rs rename to zkstack_cli/crates/zkstack/src/utils/ports.rs index 5102b4fd9c6d..04c8cef5ff59 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/ports.rs +++ b/zkstack_cli/crates/zkstack/src/utils/ports.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, fmt, ops::Range, path::Path}; +use std::{collections::HashMap, fmt, net::SocketAddr, ops::Range, path::Path}; use anyhow::{bail, Context, Result}; use config::{ @@ -109,6 +109,12 @@ impl EcosystemPorts { } } } + } else if key.as_str().map(|s| s.ends_with("addr")).unwrap_or(false) { + let socket_addr = val.as_str().unwrap().parse::()?; + if let Some(new_port) = updated_ports.get(&socket_addr.port()) { + let new_socket_addr = SocketAddr::new(socket_addr.ip(), *new_port); + *val = Value::String(new_socket_addr.to_string()); + } } } // Continue traversing @@ -169,7 +175,7 @@ impl EcosystemPortsScanner { // - Ecosystem directory (docker-compose files) let mut dirs = vec![ecosystem_config.config.clone()]; for chain in ecosystem_config.list_of_chains() { - if let Some(chain_config) = ecosystem_config.load_chain(Some(chain)) { + if let Ok(chain_config) = ecosystem_config.load_chain(Some(chain)) { dirs.push(chain_config.configs.clone()); if let Some(external_node_config_path) = &chain_config.external_node_config_path { dirs.push(external_node_config_path.clone()); diff --git a/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs b/zkstack_cli/crates/zkstack/src/utils/rocks_db.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs rename to zkstack_cli/crates/zkstack/src/utils/rocks_db.rs diff --git a/zk_toolbox/rust-toolchain b/zkstack_cli/rust-toolchain similarity index 100% rename from zk_toolbox/rust-toolchain rename to zkstack_cli/rust-toolchain diff --git a/zkstack_cli/zkstackup/README.md b/zkstack_cli/zkstackup/README.md new file mode 100644 index 000000000000..4977c4641e09 --- /dev/null +++ b/zkstack_cli/zkstackup/README.md @@ -0,0 +1,70 @@ +# zkstackup - ZK Stack CLI Installer + +`zkstackup` is a script designed to simplify the installation of +[ZK Stack CLI](https://github.com/matter-labs/zksync-era/tree/main/zkstack_cli). It allows you to install the tool from +a local directory or directly from a GitHub repository. + +## Getting Started + +To install `zkstackup`, run the following command: + +```bash +curl -L https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/install | bash +``` + +After installing `zkstackup`, you can use it to install `zkstack_cli` with: + +```bash +zkstackup +``` + +## Usage + +The `zkstackup` script provides various options for installing ZK Stack CLI: + +### Options + +- `-p, --path ` + Specify a local path to install ZK Stack CLI from. This option is ignored if `--repo` is provided. + +- `-r, --repo ` + GitHub repository to install from (e.g., "matter-labs/zksync-era"). Defaults to "matter-labs/zksync-era". + +- `-b, --branch ` + Git branch to use when installing from a repository. Ignored if `--commit` or `--version` is provided. + +- `-c, --commit ` + Git commit hash to use when installing from a repository. Ignored if `--branch` or `--version` is provided. + +- `-v, --version ` + Git tag to use when installing from a repository. Ignored if `--branch` or `--commit` is provided. + +### Local Installation + +If you provide a local path using the `-p` or `--path` option, `zkstackup` will install ZK Stack CLI from that +directory. Note that repository-specific arguments (`--repo`, `--branch`, `--commit`, `--version`) will be ignored in +this case to preserve git state. + +### Repository Installation + +By default, `zkstackup` installs ZK Stack CLI from the "matter-labs/zksync-era" GitHub repository. You can specify a +different repository, branch, commit, or version using the respective options. If multiple arguments are provided, +`zkstackup` will prioritize them as follows: + +- `--version` +- `--commit` +- `--branch` + +### Examples + +**Install from a GitHub repository with a specific version:** + +```bash +zkstackup --repo matter-labs/zksync-era --version 0.1.1 +``` + +**Install from a local path:** + +```bash +zkstackup --path /path/to/local/zkstack_cli +``` diff --git a/zkstack_cli/zkstackup/install b/zkstack_cli/zkstackup/install new file mode 100755 index 000000000000..f20ba4dd545a --- /dev/null +++ b/zkstack_cli/zkstackup/install @@ -0,0 +1,121 @@ +#!/usr/bin/env bash +set -eo pipefail + +BIN_URL="https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/zkstackup" + +HOME_DIR=${XDG_CONFIG_HOME:-$HOME} +BIN_DIR="$HOME_DIR/.local/bin" +BIN_PATH="$BIN_DIR/zkstackup" + +main() { + parse_args "$@" + + mkdir -p "$BIN_DIR" + + if [ -n "$ZKSTACKUP_PATH" ]; then + cp -r "$ZKSTACKUP_PATH" "$BIN_DIR" + else + curl -sSfL "$BIN_URL" -o "$BIN_PATH" + fi + + chmod +x "$BIN_PATH" + echo "zkstackup: successfully installed in ${BIN_DIR}." + + add_bin_folder_to_path +} + +add_bin_folder_to_path() { + if [[ ":$PATH:" == *":${BIN_DIR}:"* ]]; then + echo "zkstackup: found ${BIN_DIR} in PATH" + exit 0 + fi + + case $SHELL in + */zsh) + PROFILE="${ZDOTDIR-"$HOME"}/.zshenv" + ;; + */bash) + PROFILE="$HOME/.bashrc" + ;; + */fish) + PROFILE="$HOME/.config/fish/config.fish" + ;; + */ash) + PROFILE="$HOME/.profile" + ;; + *) + echo "zkstackup: could not detect shell, manually add ${BIN_DIR} to your PATH." + exit 1 + ;; + esac + + if [[ ! -f "$PROFILE" ]]; then + echo "zkstackup: Profile file $PROFILE does not exist, creating it." + touch "$PROFILE" + fi + + if [[ "$SHELL" == *"/fish"* ]]; then + echo -e "\n# Added by zkstackup\nfish_add_path -a $BIN_DIR" >>"$PROFILE" + echo "zkstackup: Added $BIN_DIR to PATH in $PROFILE using fish_add_path." + else + echo -e "\n# Added by zkstackup\nexport PATH=\"\$PATH:$BIN_DIR\"" >>"$PROFILE" + echo "zkstackup: Added $BIN_DIR to PATH in $PROFILE." + fi + + echo + echo "Added zkstackup to PATH." + echo "Run 'source $PROFILE' or start a new terminal session to use zkstackup." + echo "Then run 'zkstackup' to install ZK Stack CLI." +} + +parse_args() { + while [[ $# -gt 0 ]]; do + case $1 in + --) + shift + break + ;; + -p | --path) + shift + ZKSTACKUP_PATH=$1 + ;; + -l | --local) + ZKSTACKUP_PATH="./" + ;; + -g | --global) + BIN_DIR="/usr/local/bin" + BIN_PATH="$BIN_DIR/zkstackup" + ;; + -h | --help) + usage + exit 0 + ;; + *) + err "Unknown argument: $1" + usage + exit 1 + ;; + esac + shift + done +} + + +usage() { + cat < Specify a local path to install zkstackup from. + -l, --local Install zkstackup from the current directory. + -g, --global Install zkstackup for all users. + -h, --help Show this help message and exit. + +Examples: + $(basename "$0") --path /path/to/zkstackup +EOF +} + +main "$@" diff --git a/zkstack_cli/zkstackup/zkstackup b/zkstack_cli/zkstackup/zkstackup new file mode 100755 index 000000000000..20a061620f9a --- /dev/null +++ b/zkstack_cli/zkstackup/zkstackup @@ -0,0 +1,273 @@ +#!/usr/bin/env bash +set -eo pipefail + +HOME_DIR=${XDG_CONFIG_HOME:-$HOME} +LOCAL_DIR=${LOCAL_DIR:-"$HOME_DIR/.local"} +BIN_DIR="$LOCAL_DIR/bin" + +BINS=() + +main() { + parse_args "$@" + + zkstack_banner + + check_prerequisites + mkdir -p "$BIN_DIR" + + BINS+=(zkstack) + + if [ -n "$ZKSTACKUP_PATH" ]; then + install_local + else + install_from_repo + fi + + zkstack_banner + + add_bin_folder_to_path + + for bin in "${BINS[@]}"; do + success "Installed $bin to $BIN_DIR/$bin" + done +} + +PREREQUISITES=(cargo git) + +check_prerequisites() { + say "Checking prerequisites" + + failed_prerequisites=() + for prerequisite in "${PREREQUISITES[@]}"; do + if ! check_prerequisite "$prerequisite"; then + failed_prerequisites+=("$prerequisite") + fi + done + if [ ${#failed_prerequisites[@]} -gt 0 ]; then + err "The following prerequisites are missing: ${failed_prerequisites[*]}" + exit 1 + fi +} + +check_prerequisite() { + command -v "$1" &>/dev/null +} + +parse_args() { + while [[ $# -gt 0 ]]; do + case $1 in + --) + shift + break + ;; + -p | --path) + shift + ZKSTACKUP_PATH=$1 + ;; + -l | --local) + ZKSTACKUP_PATH="./" + ;; + -g | --global) + LOCAL_DIR="/usr/local" + BIN_DIR="$LOCAL_DIR/bin" + ;; + -r | --repo) + shift + ZKSTACKUP_REPO=$1 + ;; + -b | --branch) + shift + ZKSTACKUP_BRANCH=$1 + ;; + -c | --commit) + shift + ZKSTACKUP_COMMIT=$1 + ;; + -v | --version) + shift + ZKSTACKUP_VERSION=$1 + ;; + -h | --help) + usage + exit 0 + ;; + *) + err "Unknown argument: $1" + usage + exit 1 + ;; + esac + shift + done +} + +usage() { + cat < Specify a local path to install ZK Stack CLI from. Ignored if --repo is provided. + -l, --local Install ZK Stack CLI from the current directory. Ignored if --repo is provided. + -g, --global Install ZK Stack CLI for all users. + -r, --repo GitHub repository to install from (e.g., "matter-labs/zksync-era"). Defaults to "matter-labs/zksync-era". + -b, --branch Git branch to use when installing from a repository. Ignored if --commit or --version is provided. + -c, --commit Git commit hash to use when installing from a repository. Ignored if --branch or --version is provided. + -v, --version Git tag to use when installing from a repository. Ignored if --branch or --commit is provided. + -h, --help Show this help message and exit. + +Examples: + $(basename "$0") --repo matter-labs/zksync-era --version 0.1.1 +EOF +} + +install_local() { + if [ ! -d "$ZKSTACKUP_PATH/zkstack_cli" ]; then + err "Path $ZKSTACKUP_PATH does not contain zkstack_cli" + exit 1 + fi + + if [ "$ZKSTACKUP_PATH" = "./" ]; then + if git rev-parse --is-inside-work-tree >/dev/null 2>&1; then + git config --local core.hooksPath || + git config --local core.hooksPath ./.githooks + fi + fi + + if [ -n "$ZKSTACKUP_BRANCH" ] || [ -n "$ZKSTACKUP_COMMIT" ] || [ -n "$ZKSTACKUP_VERSION" ] || [ -n "$ZKSTACKUP_REPO" ]; then + warn "Ignoring --repo, --branch, --commit and --version arguments when installing from local path" + fi + + say "Installing ZK Stack CLI from $ZKSTACKUP_PATH" + ensure cd "$ZKSTACKUP_PATH"/zkstack_cli + + for bin in "${BINS[@]}"; do + say "Installing $bin" + ensure cargo install --root $LOCAL_DIR --path ./crates/$bin --force + chmod +x "$BIN_DIR/$bin" + done +} + +install_from_repo() { + if [ -n "$ZKSTACKUP_PATH" ]; then + warn "Ignoring --path argument when installing from repository" + fi + + ZKSTACKUP_REPO=${ZKSTACKUP_REPO:-"matter-labs/zksync-era"} + + say "Installing ZK Stack CLI from $ZKSTACKUP_REPO" + + if [ -n "$ZKSTACKUP_VERSION" ]; then + if [ -n "$ZKSTACKUP_COMMIT" ] || [ -n "$ZKSTACKUP_BRANCH" ]; then + warn "Ignoring --commit and --branch arguments when installing by version" + fi + ensure cargo install --root $LOCAL_DIR --git "https://github.com/$ZKSTACKUP_REPO" --tag "zkstack_cli-v$ZKSTACKUP_VERSION" --locked "${BINS[@]}" --force + elif [ -n "$ZKSTACKUP_COMMIT" ]; then + if [ -n "$ZKSTACKUP_BRANCH" ]; then + warn "Ignoring --branch argument when installing by commit" + fi + ensure cargo install --root $LOCAL_DIR --git "https://github.com/$ZKSTACKUP_REPO" --rev "$ZKSTACKUP_COMMIT" --locked "${BINS[@]}" --force + elif [ -n "$ZKSTACKUP_BRANCH" ]; then + ensure cargo install --root $LOCAL_DIR --git "https://github.com/$ZKSTACKUP_REPO" --branch "$ZKSTACKUP_BRANCH" --locked "${BINS[@]}" --force + else + ensure cargo install --root $LOCAL_DIR --git "https://github.com/$ZKSTACKUP_REPO" --locked "${BINS[@]}" --force + fi +} + +add_bin_folder_to_path() { + if [[ ":$PATH:" == *":${BIN_DIR}:"* ]]; then + echo "found ${BIN_DIR} in PATH" + exit 0 + fi + + case $SHELL in + */zsh) + PROFILE="${ZDOTDIR-"$HOME"}/.zshenv" + ;; + */bash) + PROFILE="$HOME/.bashrc" + ;; + */fish) + PROFILE="$HOME/.config/fish/config.fish" + ;; + */ash) + PROFILE="$HOME/.profile" + ;; + *) + echo "could not detect shell, manually add ${BIN_DIR} to your PATH." + exit 1 + ;; + esac + + if [[ ! -f "$PROFILE" ]]; then + echo "Profile file $PROFILE does not exist, creating it." + touch "$PROFILE" + fi + + if [[ "$SHELL" == *"/fish"* ]]; then + echo -e "\n# Added by zkstackup\nfish_add_path -a $BIN_DIR" >>"$PROFILE" + echo "Added $BIN_DIR to PATH in $PROFILE using fish_add_path." + else + echo -e "\n# Added by zkstackup\nexport PATH=\"\$PATH:$BIN_DIR\"" >>"$PROFILE" + echo "Added $BIN_DIR to PATH in $PROFILE." + fi + + echo + echo "Added zkstack to PATH." + echo "Run 'source $PROFILE' or start a new terminal session to use zkstack." +} + +ensure() { + if ! "$@"; then + err "command failed: $*" + exit 1 + fi +} + +say() { + local action="${1%% *}" + local rest="${1#"$action" }" + + echo -e "\033[1;32m$action\033[0m $rest" +} + +success() { + echo -e "\033[1;32m$1\033[0m" +} + +warn() { + echo -e "\033[1;33mWARNING: $1\033[0m" +} + +err() { + echo -e "\033[1;31mERROR: $1\033[0m" >&2 +} + +zkstack_banner() { + printf ' + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + + ███████╗██╗ ██╗ ███████╗████████╗ █████╗ ██████╗██╗ ██╗ + ╚══███╔╝██║ ██╔╝ ██╔════╝╚══██╔══╝██╔══██╗██╔════╝██║ ██╔╝ + ███╔╝ █████╔╝ ███████╗ ██║ ███████║██║ █████╔╝ + ███╔╝ ██╔═██╗ ╚════██║ ██║ ██╔══██║██║ ██╔═██╗ + ███████╗██║ ██╗ ███████║ ██║ ██║ ██║╚██████╗██║ ██╗ + ╚══════╝╚═╝ ╚═╝ ╚══════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═╝ + + + A Comprehensive Toolkit for Creating and Managing ZK Stack Chains + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + +Repo : https://github.com/matter-labs/zksync-era/ +Docs : https://docs.zksync.io/ +Contribute : https://github.com/matter-labs/zksync-era/pulls + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + +' +} + +main "$@"