From 103a56b2a66d58ffb9a16d4fb64cbfd90c2d5d7b Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 20 Mar 2024 11:24:56 +0200 Subject: [PATCH] feat: Separate Prover and Server DAL (#1334) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ * New `db_connection` crate, which contains all the main logic for managing `ConnectionPool` and `StorageProcessor` * Prover parts of DAL are moved to `prover_dal` crate * Now we have typed `ConnectionPool`s and `StorageProcessor`s, to use appropriate `ConnectionPool` there are 2 options: `ConnectionPool`, `ConnectionPool` - which will return appropriate `StorageProcessor` with corresponding methods accessible *only* by this connection pool. * `vm_version`, `prover_dal` and part of `protocol_version` types were moved to `basic_types` crate Further steps: * Add migrations for dropping tables that are not needed in databases, done [here](https://github.com/matter-labs/zksync-era/pull/1436). * Rename Server to Core, StorageProcessor to Connection ## Why ❔ To separate logic of prover and server. To prevent accessing wrong database with one `ConnectionPool`. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `zk spellcheck`. - [x] Linkcheck has been run via `zk linkcheck`. --------- Co-authored-by: EmilLuta --- Cargo.lock | 75 ++++- Cargo.toml | 1 + checks-config/era.dic | 2 + core/bin/block_reverter/src/main.rs | 4 +- core/bin/contract-verifier/src/main.rs | 6 +- core/bin/contract-verifier/src/verifier.rs | 10 +- core/bin/external_node/Cargo.toml | 3 +- core/bin/external_node/src/init.rs | 4 +- core/bin/external_node/src/main.rs | 19 +- core/bin/snapshots_creator/src/creator.rs | 10 +- core/bin/snapshots_creator/src/main.rs | 6 +- core/bin/snapshots_creator/src/tests.rs | 20 +- core/bin/verified_sources_fetcher/src/main.rs | 4 +- core/lib/basic_types/Cargo.toml | 7 +- core/lib/basic_types/src/lib.rs | 3 + core/lib/basic_types/src/protocol_version.rs | 285 ++++++++++++++++++ core/lib/basic_types/src/prover_dal.rs | 217 +++++++++++++ .../{types => basic_types}/src/vm_version.rs | 0 core/lib/circuit_breaker/src/l1_txs.rs | 4 +- .../circuit_breaker/src/replication_lag.rs | 4 +- core/lib/dal/Cargo.toml | 7 +- core/lib/dal/README.md | 4 +- .../src/basic_witness_input_producer_dal.rs | 13 +- core/lib/dal/src/blocks_dal.rs | 18 +- core/lib/dal/src/blocks_web3_dal.rs | 22 +- core/lib/dal/src/consensus_dal.rs | 9 +- core/lib/dal/src/contract_verification_dal.rs | 5 +- core/lib/dal/src/eth_sender_dal.rs | 5 +- core/lib/dal/src/events_dal.rs | 14 +- core/lib/dal/src/events_web3_dal.rs | 15 +- core/lib/dal/src/factory_deps_dal.rs | 5 +- core/lib/dal/src/lib.rs | 177 ++++++----- core/lib/dal/src/metrics.rs | 109 +------ core/lib/dal/src/models/consensus/mod.rs | 2 +- core/lib/dal/src/models/mod.rs | 1 - .../src/models/storage_protocol_version.rs | 7 +- .../dal/src/models/storage_prover_job_info.rs | 72 ----- .../lib/dal/src/models/storage_transaction.rs | 2 +- .../src/models/storage_witness_job_info.rs | 12 +- core/lib/dal/src/proof_generation_dal.rs | 5 +- core/lib/dal/src/protocol_versions_dal.rs | 8 +- .../lib/dal/src/protocol_versions_web3_dal.rs | 5 +- core/lib/dal/src/snapshot_recovery_dal.rs | 9 +- core/lib/dal/src/snapshots_creator_dal.rs | 13 +- core/lib/dal/src/snapshots_dal.rs | 11 +- core/lib/dal/src/storage_dal.rs | 9 +- core/lib/dal/src/storage_logs_dal.rs | 31 +- core/lib/dal/src/storage_logs_dedup_dal.rs | 5 +- core/lib/dal/src/storage_web3_dal.rs | 14 +- core/lib/dal/src/sync_dal.rs | 15 +- core/lib/dal/src/system_dal.rs | 6 +- core/lib/dal/src/tests/mod.rs | 11 +- core/lib/dal/src/tokens_dal.rs | 15 +- core/lib/dal/src/tokens_web3_dal.rs | 5 +- core/lib/dal/src/transactions_dal.rs | 19 +- core/lib/dal/src/transactions_web3_dal.rs | 22 +- core/lib/db_connection/Cargo.toml | 38 +++ .../src/connection.rs} | 80 +++-- .../{dal => db_connection}/src/healthcheck.rs | 14 +- .../{dal => db_connection}/src/instrument.rs | 47 ++- core/lib/db_connection/src/lib.rs | 8 + .../{dal => db_connection}/src/macro_utils.rs | 4 + core/lib/db_connection/src/metrics.rs | 96 ++++++ .../src}/processor.rs | 80 ++--- .../src/utils.rs} | 7 + .../src/versions/vm_latest/tests/upgrade.rs | 2 +- core/lib/snapshots_applier/Cargo.toml | 1 + core/lib/snapshots_applier/src/lib.rs | 16 +- core/lib/snapshots_applier/src/tests/mod.rs | 14 +- core/lib/state/src/postgres/mod.rs | 14 +- core/lib/state/src/postgres/tests.rs | 30 +- core/lib/state/src/rocksdb/mod.rs | 14 +- core/lib/state/src/rocksdb/recovery.rs | 12 +- core/lib/state/src/rocksdb/tests.rs | 25 +- core/lib/state/src/test_utils.rs | 10 +- core/lib/types/src/lib.rs | 10 +- ...rotocol_version.rs => protocol_upgrade.rs} | 275 +---------------- core/lib/vm_utils/src/lib.rs | 4 +- core/lib/vm_utils/src/storage.rs | 16 +- core/lib/zksync_core/Cargo.toml | 2 + .../contract_verification/api_decl.rs | 10 +- .../contract_verification/api_impl.rs | 1 + .../api_server/contract_verification/mod.rs | 6 +- .../src/api_server/execution_sandbox/apply.rs | 12 +- .../api_server/execution_sandbox/execute.rs | 6 +- .../src/api_server/execution_sandbox/mod.rs | 14 +- .../src/api_server/execution_sandbox/tests.rs | 8 +- .../api_server/execution_sandbox/validate.rs | 6 +- .../zksync_core/src/api_server/tree/tests.rs | 6 +- .../api_server/tx_sender/master_pool_sink.rs | 6 +- .../src/api_server/tx_sender/mod.rs | 12 +- .../src/api_server/tx_sender/proxy.rs | 6 +- .../src/api_server/tx_sender/tests.rs | 6 +- .../zksync_core/src/api_server/web3/mod.rs | 14 +- .../src/api_server/web3/namespaces/debug.rs | 1 + .../src/api_server/web3/namespaces/en.rs | 1 + .../src/api_server/web3/namespaces/eth.rs | 1 + .../api_server/web3/namespaces/snapshots.rs | 1 + .../src/api_server/web3/namespaces/zks.rs | 6 +- .../zksync_core/src/api_server/web3/pubsub.rs | 6 +- .../zksync_core/src/api_server/web3/state.rs | 12 +- .../src/api_server/web3/tests/debug.rs | 8 +- .../src/api_server/web3/tests/filters.rs | 12 +- .../src/api_server/web3/tests/mod.rs | 60 ++-- .../src/api_server/web3/tests/snapshots.rs | 2 +- .../src/api_server/web3/tests/vm.rs | 28 +- .../src/api_server/web3/tests/ws.rs | 22 +- .../src/basic_witness_input_producer/mod.rs | 10 +- .../lib/zksync_core/src/block_reverter/mod.rs | 6 +- .../src/commitment_generator/mod.rs | 6 +- .../zksync_core/src/consensus/storage/mod.rs | 8 +- .../lib/zksync_core/src/consensus/testonly.rs | 1 + .../src/consistency_checker/mod.rs | 8 +- .../src/consistency_checker/tests/mod.rs | 12 +- .../zksync_core/src/eth_sender/aggregator.rs | 18 +- .../src/eth_sender/eth_tx_aggregator.rs | 14 +- .../src/eth_sender/eth_tx_manager.rs | 32 +- .../lib/zksync_core/src/eth_sender/metrics.rs | 4 +- .../src/eth_sender/publish_criterion.rs | 14 +- core/lib/zksync_core/src/eth_sender/tests.rs | 28 +- .../event_processors/governance_upgrades.rs | 6 +- .../src/eth_watch/event_processors/mod.rs | 4 +- .../event_processors/priority_ops.rs | 4 +- .../eth_watch/event_processors/upgrades.rs | 4 +- core/lib/zksync_core/src/eth_watch/mod.rs | 15 +- core/lib/zksync_core/src/eth_watch/tests.rs | 22 +- core/lib/zksync_core/src/fee_model.rs | 6 +- core/lib/zksync_core/src/genesis.rs | 27 +- .../src/house_keeper/blocks_state_reporter.rs | 6 +- .../fri_proof_compressor_job_retry_manager.rs | 5 +- .../fri_proof_compressor_queue_monitor.rs | 10 +- .../fri_prover_job_retry_manager.rs | 5 +- .../house_keeper/fri_prover_queue_monitor.rs | 11 +- .../fri_scheduler_circuit_queuer.rs | 5 +- ...ri_witness_generator_jobs_retry_manager.rs | 5 +- .../fri_witness_generator_queue_monitor.rs | 9 +- ...waiting_to_queued_fri_witness_job_mover.rs | 5 +- core/lib/zksync_core/src/lib.rs | 84 +++--- .../src/metadata_calculator/helpers.rs | 18 +- .../src/metadata_calculator/mod.rs | 4 +- .../src/metadata_calculator/recovery/mod.rs | 17 +- .../src/metadata_calculator/recovery/tests.rs | 9 +- .../src/metadata_calculator/tests.rs | 41 +-- .../src/metadata_calculator/updater.rs | 10 +- .../zksync_core/src/proof_data_handler/mod.rs | 4 +- .../proof_data_handler/request_processor.rs | 6 +- .../lib/zksync_core/src/reorg_detector/mod.rs | 6 +- .../zksync_core/src/reorg_detector/tests.rs | 26 +- .../batch_executor/main_executor.rs | 6 +- .../state_keeper/batch_executor/tests/mod.rs | 28 +- .../batch_executor/tests/tester.rs | 12 +- .../src/state_keeper/io/common/mod.rs | 6 +- .../src/state_keeper/io/common/tests.rs | 26 +- .../state_keeper/io/fee_address_migration.rs | 20 +- .../src/state_keeper/io/mempool.rs | 8 +- .../zksync_core/src/state_keeper/io/mod.rs | 11 +- .../src/state_keeper/io/seal_logic.rs | 12 +- .../src/state_keeper/io/tests/mod.rs | 36 +-- .../src/state_keeper/io/tests/tester.rs | 10 +- .../zksync_core/src/state_keeper/keeper.rs | 10 +- .../src/state_keeper/mempool_actor.rs | 16 +- core/lib/zksync_core/src/state_keeper/mod.rs | 4 +- .../src/state_keeper/tests/tester.rs | 2 +- .../lib/zksync_core/src/state_keeper/types.rs | 7 +- .../sync_layer/batch_status_updater/mod.rs | 10 +- .../sync_layer/batch_status_updater/tests.rs | 20 +- .../zksync_core/src/sync_layer/external_io.rs | 8 +- .../lib/zksync_core/src/sync_layer/fetcher.rs | 6 +- .../lib/zksync_core/src/sync_layer/genesis.rs | 4 +- core/lib/zksync_core/src/sync_layer/tests.rs | 22 +- core/lib/zksync_core/src/utils/mod.rs | 14 +- core/lib/zksync_core/src/utils/testonly.rs | 8 +- core/node/node_framework/Cargo.toml | 2 + .../src/implementations/layers/eth_watch.rs | 4 +- .../implementations/layers/house_keeper.rs | 9 +- .../layers/metadata_calculator.rs | 4 +- .../src/implementations/layers/pools_layer.rs | 21 +- .../layers/proof_data_handler.rs | 4 +- .../src/implementations/resources/pools.rs | 34 ++- prover/Cargo.lock | 77 ++++- prover/proof_fri_compressor/Cargo.toml | 2 +- prover/proof_fri_compressor/src/compressor.rs | 6 +- prover/proof_fri_compressor/src/main.rs | 4 +- ...f6e1df560ab1e8935564355236e90b6147d2f.json | 0 ...e57a83f37da8999849377dfad60b44989be39.json | 0 ...986511265c541d81b1d21f0a751ae1399c626.json | 0 ...aff3a06b7a9c1866132d62e4449fa9436c7c4.json | 0 ...28a20420763a3a22899ad0e5f4b953b615a9e.json | 0 ...1610ffa7f169d560e79e89b99eedf681c6773.json | 0 ...4d3648b75bf01ff336bbd77d15f9aa5fd6443.json | 0 ...52554ccfb5b83f00efdc12bed0f60ef439785.json | 0 ...5fb7a093b73727f75e0cb7db9cea480c95f5c.json | 0 ...22ff6372f63ecadb504a329499b02e7d3550e.json | 0 ...f9044ae85b579c7051301b40bd5f94df1f530.json | 0 ...6769dbb04d3a61cf232892236c974660ffe64.json | 0 ...8b87ead36f593488437c6f67da629ca81e4fa.json | 0 ...5da82065836fe17687ffad04126a6a8b2b27c.json | 0 ...d772d8801b0ae673b7173ae08a1fa6cbf67b2.json | 0 ...afca34d61347e0e2e29fb07ca3d1b8b1f309c.json | 0 ...de366f88c20d00a8d928b6cf4caae0702b333.json | 0 ...9a8f447824a5ab466bb6eea1710e8aeaa2c56.json | 0 ...d94f28b7b2b60d551d552a9b0bab1f1791e39.json | 0 ...6009503182c300877e74a8539f231050e6f85.json | 0 ...7effac442434c6e734d977e6682a7484abe7f.json | 0 ...52aeb5f06c26f68d131dd242f6ed68816c513.json | 0 ...e7ee767e4c98706246eb113498c0f817f5f38.json | 0 ...8bf92c7f2c55386c8208e3a82b30456abd5b4.json | 0 ...715e903f3b399886c2c73e838bd924fed6776.json | 0 ...50d625f057acf1440b6550375ce5509a816a6.json | 0 ...7227120a8279db1875d26ccae5ee0785f46a9.json | 0 ...1578db18c29cdca85b8b6aad86fe2a9bf6bbe.json | 0 ...cbb724af0f0216433a70f19d784e3f2afbc9f.json | 0 ...a36532fee1450733849852dfd20e18ded1f03.json | 0 ...d2e2326d7ace079b095def52723a45b65d3f3.json | 0 ...5ca679a0459a012899a373c67393d30d12601.json | 0 ...c9a64904026506914abae2946e5d353d6a604.json | 0 ...9f41220c51f58a03c61d6b7789eab0504e320.json | 0 ...477dcf21955e0921ba648ba2e751dbfc3cb45.json | 0 ...f724216807ffd481cd6f7f19968e42e52b284.json | 0 ...1ac4ab2d73bda6022700aeb0a630a2563a4b4.json | 0 ...f8dcbcb014b4f808c6232abd9a83354c995ac.json | 0 ...663a9c5ea62ea7c99a77941eb8f05d4454125.json | 0 ...7b56187686173327498ac75424593547c19c5.json | 0 ...f8c12deeca6b8843fe3869cc2b02b30da5de6.json | 0 ...49b6370c211a7fc24ad03a5f0e327f9d18040.json | 0 ...d964d4bb39b9dcd18fb03bc11ce2fb32b7fb3.json | 0 ...0103263af3ff5cb6c9dc5df59d3cd2a5e56b4.json | 0 ...912d57f8eb2a38bdb7884fc812a2897a3a660.json | 0 ...a653852f7e43670076eb2ebcd49542a870539.json | 0 ...e899e360650afccb34f5cc301b5cbac4a3d36.json | 0 ...8d43c31ec7441a7f6c5040e120810ebbb72f7.json | 0 ...ac429aac3c030f7e226a1264243d8cdae038d.json | 0 ...cb21a635037d89ce24dd3ad58ffaadb59594a.json | 0 ...3e67f08f2ead5f55bfb6594e50346bf9cf2ef.json | 0 ...813d2b2d411bd5faf8306cd48db500532b711.json | 0 ...7e88abd0f8164c2413dc83c91c29665ca645e.json | 0 ...2060fbea775dc185f639139fbfd23e4d5f3c6.json | 0 ...70a4e629b2a1cde641e74e4e55bb100df809f.json | 0 ...b957e92cd375ec33fe16f855f319ffc0b208e.json | 0 ...71ababa66e4a443fbefbfffca72b7540b075b.json | 0 ...91a9984685eaaaa0a8b223410d560a15a3034.json | 0 ...6686e655206601854799139c22c017a214744.json | 0 ...c3465e2211ef3013386feb12d4cc04e0eade9.json | 0 ...15aaade450980719933089824eb8c494d64a4.json | 0 ...bae63443c51deb818dd0affd1a0949b161737.json | 0 ...17db60405a887f9f7fa0ca60aa7fc879ce630.json | 0 ...5e6f8b7f88a0894a7f9e27fc26f93997d37c7.json | 0 ...304e8a35fd65bf37e976b7106f57c57e70b9b.json | 0 prover/prover_dal/Cargo.toml | 24 +- .../prover_dal}/doc/FriProofCompressorDal.md | 0 .../prover_dal}/doc/FriProverDal.md | 0 .../prover_dal}/doc/FriWitnessGeneratorDal.md | 0 .../src/fri_gpu_prover_queue_dal.rs | 11 +- .../src/fri_proof_compressor_dal.rs | 14 +- .../src/fri_protocol_versions_dal.rs | 7 +- .../prover_dal}/src/fri_prover_dal.rs | 237 +-------------- .../fri_scheduler_dependency_tracker_dal.rs | 11 +- .../src/fri_witness_generator_dal.rs | 17 +- prover/prover_dal/src/lib.rs | 78 +++++ prover/prover_fri/Cargo.toml | 2 +- .../src/gpu_prover_job_processor.rs | 8 +- prover/prover_fri/src/main.rs | 16 +- prover/prover_fri/src/prover_job_processor.rs | 6 +- prover/prover_fri/src/socket_listener.rs | 10 +- prover/prover_fri/src/utils.rs | 4 +- prover/prover_fri_gateway/Cargo.toml | 2 +- .../src/api_data_fetcher.rs | 4 +- prover/prover_fri_gateway/src/main.rs | 4 +- .../src/proof_gen_data_fetcher.rs | 1 + .../prover_fri_gateway/src/proof_submitter.rs | 2 +- prover/prover_fri_utils/Cargo.toml | 2 +- prover/prover_fri_utils/src/lib.rs | 4 +- prover/witness_generator/Cargo.toml | 1 + .../witness_generator/src/basic_circuits.rs | 25 +- .../witness_generator/src/leaf_aggregation.rs | 12 +- prover/witness_generator/src/main.rs | 7 +- .../witness_generator/src/node_aggregation.rs | 12 +- prover/witness_generator/src/scheduler.rs | 5 +- prover/witness_generator/tests/basic_test.rs | 2 +- prover/witness_vector_generator/Cargo.toml | 2 +- .../witness_vector_generator/src/generator.rs | 13 +- prover/witness_vector_generator/src/main.rs | 2 +- 282 files changed, 2227 insertions(+), 1804 deletions(-) create mode 100644 core/lib/basic_types/src/protocol_version.rs create mode 100644 core/lib/basic_types/src/prover_dal.rs rename core/lib/{types => basic_types}/src/vm_version.rs (100%) delete mode 100644 core/lib/dal/src/models/storage_prover_job_info.rs create mode 100644 core/lib/db_connection/Cargo.toml rename core/lib/{dal/src/connection/mod.rs => db_connection/src/connection.rs} (89%) rename core/lib/{dal => db_connection}/src/healthcheck.rs (75%) rename core/lib/{dal => db_connection}/src/instrument.rs (87%) create mode 100644 core/lib/db_connection/src/lib.rs rename core/lib/{dal => db_connection}/src/macro_utils.rs (99%) create mode 100644 core/lib/db_connection/src/metrics.rs rename core/lib/{dal/src/connection => db_connection/src}/processor.rs (86%) rename core/lib/{dal/src/time_utils.rs => db_connection/src/utils.rs} (80%) rename core/lib/types/src/{protocol_version.rs => protocol_upgrade.rs} (69%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-00b88ec7fcf40bb18e0018b7c76f6e1df560ab1e8935564355236e90b6147d2f.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-01ac5343beb09ec5bd45b39d560e57a83f37da8999849377dfad60b44989be39.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-0d13b8947b1bafa9e5bc6fdc70a986511265c541d81b1d21f0a751ae1399c626.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-12ab208f416e2875f89e558f0d4aff3a06b7a9c1866132d62e4449fa9436c7c4.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-15858168fea6808c6d59d0e6d8f28a20420763a3a22899ad0e5f4b953b615a9e.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-1bc6597117db032b87df33040d61610ffa7f169d560e79e89b99eedf681c6773.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-204cfd593c62a5a1582215a5f0f4d3648b75bf01ff336bbd77d15f9aa5fd6443.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-2b626262c8003817ee02978f77452554ccfb5b83f00efdc12bed0f60ef439785.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-2d31fcce581975a82d6156b52e35fb7a093b73727f75e0cb7db9cea480c95f5c.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-33d6be45b246523ad76f9ae512322ff6372f63ecadb504a329499b02e7d3550e.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-35b87a3b7db0af87c6a95e9fe7ef9044ae85b579c7051301b40bd5f94df1f530.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-3c3abbf689fa64c6da7de69fd916769dbb04d3a61cf232892236c974660ffe64.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-3ec365c5c81f4678a905ae5bbd48b87ead36f593488437c6f67da629ca81e4fa.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-46c4696fff5a4b8cc5cb46b05645da82065836fe17687ffad04126a6a8b2b27c.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-4d263992ed6d5abbd7d3ca43af9d772d8801b0ae673b7173ae08a1fa6cbf67b2.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-4d92a133a36afd682a84fbfd75aafca34d61347e0e2e29fb07ca3d1b8b1f309c.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-510bfea2346a8c63e74222e1159de366f88c20d00a8d928b6cf4caae0702b333.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-534822a226068cde83ad8c30b569a8f447824a5ab466bb6eea1710e8aeaa2c56.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-5821f1446983260168cec366af26009503182c300877e74a8539f231050e6f85.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-5e781f84ec41edd0941fa84de837effac442434c6e734d977e6682a7484abe7f.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-6ae2ed34230beae0e86c584e293e7ee767e4c98706246eb113498c0f817f5f38.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-75f6eaa518e7840374c4e44b0788bf92c7f2c55386c8208e3a82b30456abd5b4.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-7a2145e2234a7896031bbc1ce82715e903f3b399886c2c73e838bd924fed6776.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-7a8fffe8d4e3085e00c98f770d250d625f057acf1440b6550375ce5509a816a6.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-8182690d0326b820d23fba49d391578db18c29cdca85b8b6aad86fe2a9bf6bbe.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-8f5e89ccadd4ea1da7bfe9793a1cbb724af0f0216433a70f19d784e3f2afbc9f.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-9ef2f43e6201cc00a0e1425a666a36532fee1450733849852dfd20e18ded1f03.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-a0e2b2c034cc5f668f0b3d43b94d2e2326d7ace079b095def52723a45b65d3f3.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-a4861c931e84d897c27f666de1c5ca679a0459a012899a373c67393d30d12601.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-a84ee70bec8c03bd51e1c6bad44c9a64904026506914abae2946e5d353d6a604.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-aa91697157517322b0dbb53dca99f41220c51f58a03c61d6b7789eab0504e320.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-aaf4fb97c95a5290fb1620cd868477dcf21955e0921ba648ba2e751dbfc3cb45.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-af72fabd90eb43fb315f46d7fe9f724216807ffd481cd6f7f19968e42e52b284.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-afc24bd1407dba82cd3dc9e7ee71ac4ab2d73bda6022700aeb0a630a2563a4b4.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-b17c71983da060f08616e001b42f8dcbcb014b4f808c6232abd9a83354c995ac.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-b23ddb16513d69331056b94d466663a9c5ea62ea7c99a77941eb8f05d4454125.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-b367ecb1ebee86ec598c4079591f8c12deeca6b8843fe3869cc2b02b30da5de6.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-b4304b9afb9f838eee1fe95af5fd964d4bb39b9dcd18fb03bc11ce2fb32b7fb3.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-b4794e6a0c2366d5d95ab373c310103263af3ff5cb6c9dc5df59d3cd2a5e56b4.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-bfb80956a18eabf266f5b5a9d62912d57f8eb2a38bdb7884fc812a2897a3a660.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-c10cf20825de4d24300c7ec50d4a653852f7e43670076eb2ebcd49542a870539.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-c23d5ff919ade5898c6a912780ae899e360650afccb34f5cc301b5cbac4a3d36.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-c41312e01aa66897552e8be9acc8d43c31ec7441a7f6c5040e120810ebbb72f7.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-c706a49ff54f6b424e24d061fe7ac429aac3c030f7e226a1264243d8cdae038d.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-ca9d06141265b8524ee28c55569cb21a635037d89ce24dd3ad58ffaadb59594a.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-ce5779092feb8a3d3e2c5e395783e67f08f2ead5f55bfb6594e50346bf9cf2ef.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-d7e8eabd7b43ff62838fbc847e4813d2b2d411bd5faf8306cd48db500532b711.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-d8e3ee346375e4b6a8b2c73a3827e88abd0f8164c2413dc83c91c29665ca645e.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-db3e74f0e83ffbf84a6d61e560f2060fbea775dc185f639139fbfd23e4d5f3c6.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-df00e33809768120e395d8f740770a4e629b2a1cde641e74e4e55bb100df809f.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-e3479d12d9dc97001cf03dc42d9b957e92cd375ec33fe16f855f319ffc0b208e.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-e74a34a59e6afda689b0ec9e19071ababa66e4a443fbefbfffca72b7540b075b.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-e9adf5b5a1ab84c20a514a7775f91a9984685eaaaa0a8b223410d560a15a3034.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-e9ca863d6e77edd39a9fc55700a6686e655206601854799139c22c017a214744.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-ec04b89218111a5dc8d5ade506ac3465e2211ef3013386feb12d4cc04e0eade9.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-edc61e1285bf6d3837acc67af4f15aaade450980719933089824eb8c494d64a4.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-ef687be83e496d6647e4dfef9eabae63443c51deb818dd0affd1a0949b161737.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-f4362a61ab05af3d71a3232d2f017db60405a887f9f7fa0ca60aa7fc879ce630.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-f717ca5d0890759496739a678955e6f8b7f88a0894a7f9e27fc26f93997d37c7.json (100%) rename {core/lib/dal => prover/prover_dal}/.sqlx/query-fcddeb96dcd1611dedb2091c1be304e8a35fd65bf37e976b7106f57c57e70b9b.json (100%) rename {core/lib/dal => prover/prover_dal}/doc/FriProofCompressorDal.md (100%) rename {core/lib/dal => prover/prover_dal}/doc/FriProverDal.md (100%) rename {core/lib/dal => prover/prover_dal}/doc/FriWitnessGeneratorDal.md (100%) rename {core/lib/dal => prover/prover_dal}/src/fri_gpu_prover_queue_dal.rs (94%) rename {core/lib/dal => prover/prover_dal}/src/fri_proof_compressor_dal.rs (97%) rename {core/lib/dal => prover/prover_dal}/src/fri_protocol_versions_dal.rs (92%) rename {core/lib/dal => prover/prover_dal}/src/fri_prover_dal.rs (72%) rename {core/lib/dal => prover/prover_dal}/src/fri_scheduler_dependency_tracker_dal.rs (94%) rename {core/lib/dal => prover/prover_dal}/src/fri_witness_generator_dal.rs (99%) diff --git a/Cargo.lock b/Cargo.lock index 5146cedaade2..025d3841fc8d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -392,9 +392,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" [[package]] name = "arr_macro" @@ -4150,7 +4150,16 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a015b430d3c108a207fd776d2e2196aaf8b1cf8cf93253e3a097ff3085076a1" dependencies = [ - "num_enum_derive", + "num_enum_derive 0.6.1", +] + +[[package]] +name = "num_enum" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" +dependencies = [ + "num_enum_derive 0.7.2", ] [[package]] @@ -4165,6 +4174,18 @@ dependencies = [ "syn 2.0.38", ] +[[package]] +name = "num_enum_derive" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" +dependencies = [ + "proc-macro-crate 2.0.1", + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 2.0.38", +] + [[package]] name = "object" version = "0.32.1" @@ -4838,6 +4859,16 @@ dependencies = [ "thiserror", ] +[[package]] +name = "prover_dal" +version = "0.1.0" +dependencies = [ + "sqlx", + "strum", + "zksync_basic_types", + "zksync_db_connection", +] + [[package]] name = "ptr_meta" version = "0.1.4" @@ -7829,7 +7860,7 @@ version = "0.1.0" source = "git+https://github.com/matter-labs/era-zk_evm_abstractions.git#32dd320953841aa78579d9da08abbc70bcaed175" dependencies = [ "anyhow", - "num_enum", + "num_enum 0.6.1", "serde", "static_assertions", "zkevm_opcode_defs 1.3.2", @@ -7841,7 +7872,7 @@ version = "1.4.1" source = "git+https://github.com/matter-labs/era-zk_evm_abstractions.git?branch=v1.4.1#0aac08c3b097ee8147e748475117ac46bddcdcef" dependencies = [ "anyhow", - "num_enum", + "num_enum 0.6.1", "serde", "static_assertions", "zkevm_opcode_defs 1.4.1", @@ -7950,8 +7981,11 @@ dependencies = [ name = "zksync_basic_types" version = "0.1.0" dependencies = [ + "chrono", + "num_enum 0.7.2", "serde", "serde_json", + "strum", "web3", ] @@ -8232,6 +8266,7 @@ dependencies = [ "pin-project-lite", "prometheus_exporter", "prost", + "prover_dal", "rand 0.8.5", "reqwest", "serde", @@ -8261,6 +8296,7 @@ dependencies = [ "zksync_consensus_utils", "zksync_contracts", "zksync_dal", + "zksync_db_connection", "zksync_env_config", "zksync_eth_client", "zksync_eth_signer", @@ -8303,13 +8339,11 @@ name = "zksync_dal" version = "0.1.0" dependencies = [ "anyhow", - "assert_matches", "bigdecimal", "bincode", "chrono", "hex", "itertools 0.10.5", - "once_cell", "prost", "rand 0.8.5", "serde", @@ -8319,12 +8353,11 @@ dependencies = [ "thiserror", "tokio", "tracing", - "url", "vise", "zksync_consensus_roles", "zksync_consensus_storage", "zksync_contracts", - "zksync_health_check", + "zksync_db_connection", "zksync_protobuf", "zksync_protobuf_build", "zksync_system_constants", @@ -8332,6 +8365,24 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_db_connection" +version = "0.1.0" +dependencies = [ + "anyhow", + "assert_matches", + "rand 0.8.5", + "serde", + "serde_json", + "sqlx", + "tokio", + "tracing", + "url", + "vise", + "zksync_basic_types", + "zksync_health_check", +] + [[package]] name = "zksync_env_config" version = "0.1.0" @@ -8410,6 +8461,7 @@ dependencies = [ "zksync_contracts", "zksync_core", "zksync_dal", + "zksync_db_connection", "zksync_health_check", "zksync_l1_contract_interface", "zksync_object_store", @@ -8508,6 +8560,7 @@ dependencies = [ "async-trait", "futures 0.3.28", "prometheus_exporter", + "prover_dal", "thiserror", "tokio", "tracing", @@ -8516,6 +8569,7 @@ dependencies = [ "zksync_contracts", "zksync_core", "zksync_dal", + "zksync_db_connection", "zksync_env_config", "zksync_eth_client", "zksync_health_check", @@ -8664,6 +8718,7 @@ dependencies = [ "tracing", "vise", "zksync_dal", + "zksync_db_connection", "zksync_health_check", "zksync_object_store", "zksync_types", @@ -8735,7 +8790,7 @@ dependencies = [ "hex", "itertools 0.10.5", "num 0.4.1", - "num_enum", + "num_enum 0.6.1", "once_cell", "prost", "rlp", diff --git a/Cargo.toml b/Cargo.toml index fba46b99db54..7e19c459603d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,7 @@ members = [ # Node services "core/node/node_framework", # Libraries + "core/lib/db_connection", "core/lib/zksync_core", "core/lib/basic_types", "core/lib/config", diff --git a/checks-config/era.dic b/checks-config/era.dic index 84a6d10363ca..1943f84a7dfc 100644 --- a/checks-config/era.dic +++ b/checks-config/era.dic @@ -918,3 +918,5 @@ stateful WIP oneshot p2p +StorageProcessor +StorageMarker diff --git a/core/bin/block_reverter/src/main.rs b/core/bin/block_reverter/src/main.rs index 767407c35b35..9a687fc890f4 100644 --- a/core/bin/block_reverter/src/main.rs +++ b/core/bin/block_reverter/src/main.rs @@ -8,7 +8,7 @@ use zksync_config::{ use zksync_core::block_reverter::{ BlockReverter, BlockReverterEthConfig, BlockReverterFlags, L1ExecutedBatchesRevert, NodeRole, }; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use zksync_env_config::FromEnv; use zksync_types::{L1BatchNumber, U256}; @@ -96,7 +96,7 @@ async fn main() -> anyhow::Result<()> { let postgres_config = PostgresConfig::from_env().context("PostgresConfig::from_env()")?; let config = BlockReverterEthConfig::new(eth_sender, contracts, eth_client.web3_url.clone()); - let connection_pool = ConnectionPool::builder( + let connection_pool = ConnectionPool::::builder( postgres_config.master_url()?, postgres_config.max_connections()?, ) diff --git a/core/bin/contract-verifier/src/main.rs b/core/bin/contract-verifier/src/main.rs index 9030236cb52d..c8977f99daf2 100644 --- a/core/bin/contract-verifier/src/main.rs +++ b/core/bin/contract-verifier/src/main.rs @@ -8,7 +8,7 @@ use zksync_config::{ configs::{ObservabilityConfig, PrometheusConfig}, ApiConfig, ContractVerifierConfig, PostgresConfig, }; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server, ServerDals}; use zksync_env_config::FromEnv; use zksync_queued_job_processor::JobProcessor; use zksync_utils::wait_for_tasks::wait_for_tasks; @@ -20,7 +20,7 @@ pub mod verifier; pub mod zksolc_utils; pub mod zkvyper_utils; -async fn update_compiler_versions(connection_pool: &ConnectionPool) { +async fn update_compiler_versions(connection_pool: &ConnectionPool) { let mut storage = connection_pool.access_storage().await.unwrap(); let mut transaction = storage.start_transaction().await.unwrap(); @@ -134,7 +134,7 @@ async fn main() -> anyhow::Result<()> { ..ApiConfig::from_env().context("ApiConfig")?.prometheus }; let postgres_config = PostgresConfig::from_env().context("PostgresConfig")?; - let pool = ConnectionPool::singleton( + let pool = ConnectionPool::::singleton( postgres_config .master_url() .context("Master DB URL is absent")?, diff --git a/core/bin/contract-verifier/src/verifier.rs b/core/bin/contract-verifier/src/verifier.rs index ab9451240f78..55ac7177f6d5 100644 --- a/core/bin/contract-verifier/src/verifier.rs +++ b/core/bin/contract-verifier/src/verifier.rs @@ -12,7 +12,7 @@ use lazy_static::lazy_static; use regex::Regex; use tokio::time; use zksync_config::ContractVerifierConfig; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_env_config::FromEnv; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::{ @@ -42,11 +42,11 @@ enum ConstructorArgs { #[derive(Debug)] pub struct ContractVerifier { config: ContractVerifierConfig, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, } impl ContractVerifier { - pub fn new(config: ContractVerifierConfig, connection_pool: ConnectionPool) -> Self { + pub fn new(config: ContractVerifierConfig, connection_pool: ConnectionPool) -> Self { Self { config, connection_pool, @@ -54,7 +54,7 @@ impl ContractVerifier { } async fn verify( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, mut request: VerificationRequest, config: ContractVerifierConfig, ) -> Result { @@ -429,7 +429,7 @@ impl ContractVerifier { } async fn process_result( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, request_id: usize, verification_result: Result, ) { diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index 491e60b58fb0..d9c391e168af 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -13,6 +13,7 @@ publish = false # We don't want to publish our binaries. [dependencies] zksync_core = { path = "../../lib/zksync_core" } zksync_dal = { path = "../../lib/dal" } +zksync_db_connection = { path = "../../lib/db_connection" } zksync_config = { path = "../../lib/config" } zksync_storage = { path = "../../lib/storage" } zksync_utils = { path = "../../lib/utils" } @@ -21,7 +22,7 @@ zksync_basic_types = { path = "../../lib/basic_types" } zksync_contracts = { path = "../../lib/contracts" } zksync_l1_contract_interface = { path = "../../lib/l1_contract_interface" } zksync_snapshots_applier = { path = "../../lib/snapshots_applier" } -zksync_object_store = { path="../../lib/object_store" } +zksync_object_store = { path = "../../lib/object_store" } prometheus_exporter = { path = "../../lib/prometheus_exporter" } zksync_health_check = { path = "../../lib/health_check" } zksync_web3_decl = { path = "../../lib/web3_decl" } diff --git a/core/bin/external_node/src/init.rs b/core/bin/external_node/src/init.rs index e8339f5afa39..de8844cd2d22 100644 --- a/core/bin/external_node/src/init.rs +++ b/core/bin/external_node/src/init.rs @@ -3,7 +3,7 @@ use anyhow::Context as _; use zksync_basic_types::{L1BatchNumber, L2ChainId}; use zksync_core::sync_layer::genesis::perform_genesis_if_needed; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server, ServerDals}; use zksync_health_check::AppHealthCheck; use zksync_object_store::ObjectStoreFactory; use zksync_snapshots_applier::SnapshotsApplierConfig; @@ -20,7 +20,7 @@ enum InitDecision { } pub(crate) async fn ensure_storage_initialized( - pool: &ConnectionPool, + pool: &ConnectionPool, main_node_client: &HttpClient, app_health: &AppHealthCheck, l2_chain_id: L2ChainId, diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index a6a0d1ff7442..a6d14851face 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -33,7 +33,8 @@ use zksync_core::{ MainNodeClient, SyncState, }, }; -use zksync_dal::{healthcheck::ConnectionPoolHealthCheck, ConnectionPool}; +use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Server, ServerDals}; +use zksync_db_connection::healthcheck::ConnectionPoolHealthCheck; use zksync_health_check::{AppHealthCheck, HealthStatus, ReactiveHealthCheck}; use zksync_state::PostgresStorageCaches; use zksync_storage::RocksDB; @@ -59,7 +60,7 @@ async fn build_state_keeper( action_queue: ActionQueue, state_keeper_db_path: String, config: &ExternalNodeConfig, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, sync_state: SyncState, l2_erc20_bridge_addr: Address, miniblock_sealer_handle: MiniblockSealerHandle, @@ -111,7 +112,7 @@ async fn build_state_keeper( async fn init_tasks( config: &ExternalNodeConfig, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, main_node_client: HttpClient, task_handles: &mut Vec>>, app_health: &AppHealthCheck, @@ -220,7 +221,7 @@ async fn init_tasks( } })); - let singleton_pool_builder = ConnectionPool::singleton(&config.postgres.database_url); + let singleton_pool_builder = ConnectionPool::::singleton(&config.postgres.database_url); let metadata_calculator_config = MetadataCalculatorConfig { db_path: config.required.merkle_tree_path.clone(), @@ -495,13 +496,13 @@ async fn main() -> anyhow::Result<()> { config.consensus = None; } if let Some(threshold) = config.optional.slow_query_threshold() { - ConnectionPool::global_config().set_slow_query_threshold(threshold)?; + ConnectionPool::::global_config().set_slow_query_threshold(threshold)?; } if let Some(threshold) = config.optional.long_connection_threshold() { - ConnectionPool::global_config().set_long_connection_threshold(threshold)?; + ConnectionPool::::global_config().set_long_connection_threshold(threshold)?; } - let connection_pool = ConnectionPool::builder( + let connection_pool = ConnectionPool::::builder( &config.postgres.database_url, config.postgres.max_connections, ) @@ -540,9 +541,7 @@ async fn main() -> anyhow::Result<()> { // Start scraping Postgres metrics before store initialization as well. let metrics_pool = connection_pool.clone(); let mut task_handles = vec![tokio::spawn(async move { - metrics_pool - .run_postgres_metrics_scraping(Duration::from_secs(60)) - .await; + PostgresMetrics::run_scraping(metrics_pool, Duration::from_secs(60)).await; Ok(()) })]; diff --git a/core/bin/snapshots_creator/src/creator.rs b/core/bin/snapshots_creator/src/creator.rs index 0e716b5c99e1..753a9bf94029 100644 --- a/core/bin/snapshots_creator/src/creator.rs +++ b/core/bin/snapshots_creator/src/creator.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use anyhow::Context as _; use tokio::sync::Semaphore; use zksync_config::SnapshotsCreatorConfig; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_object_store::ObjectStore; use zksync_types::{ snapshots::{ @@ -60,14 +60,14 @@ impl SnapshotProgress { #[derive(Debug)] pub(crate) struct SnapshotCreator { pub blob_store: Arc, - pub master_pool: ConnectionPool, - pub replica_pool: ConnectionPool, + pub master_pool: ConnectionPool, + pub replica_pool: ConnectionPool, #[cfg(test)] pub event_listener: Box, } impl SnapshotCreator { - async fn connect_to_replica(&self) -> anyhow::Result> { + async fn connect_to_replica(&self) -> anyhow::Result> { self.replica_pool .access_storage_tagged("snapshots_creator") .await @@ -192,7 +192,7 @@ impl SnapshotCreator { config: &SnapshotsCreatorConfig, min_chunk_count: u64, latest_snapshot: Option<&SnapshotMetadata>, - conn: &mut StorageProcessor<'_>, + conn: &mut StorageProcessor<'_, Server>, ) -> anyhow::Result> { // We subtract 1 so that after restore, EN node has at least one L1 batch to fetch let sealed_l1_batch_number = conn.blocks_dal().get_sealed_l1_batch_number().await?; diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs index 8d6ff39a93d1..5f36ca1bb970 100644 --- a/core/bin/snapshots_creator/src/main.rs +++ b/core/bin/snapshots_creator/src/main.rs @@ -16,7 +16,7 @@ use zksync_config::{ configs::{ObservabilityConfig, PrometheusConfig}, PostgresConfig, SnapshotsCreatorConfig, }; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use zksync_env_config::{object_store::SnapshotsObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; @@ -81,14 +81,14 @@ async fn main() -> anyhow::Result<()> { let creator_config = SnapshotsCreatorConfig::from_env().context("SnapshotsCreatorConfig::from_env")?; - let replica_pool = ConnectionPool::builder( + let replica_pool = ConnectionPool::::builder( postgres_config.replica_url()?, creator_config.concurrent_queries_count, ) .build() .await?; - let master_pool = ConnectionPool::singleton(postgres_config.master_url()?) + let master_pool = ConnectionPool::::singleton(postgres_config.master_url()?) .build() .await?; diff --git a/core/bin/snapshots_creator/src/tests.rs b/core/bin/snapshots_creator/src/tests.rs index b12002e945e6..9d2aab690293 100644 --- a/core/bin/snapshots_creator/src/tests.rs +++ b/core/bin/snapshots_creator/src/tests.rs @@ -10,7 +10,7 @@ use std::{ }; use rand::{thread_rng, Rng}; -use zksync_dal::StorageProcessor; +use zksync_dal::{ServerDals, StorageProcessor}; use zksync_object_store::ObjectStore; use zksync_types::{ block::{L1BatchHeader, MiniblockHeader}, @@ -61,7 +61,7 @@ impl HandleEvent for TestEventListener { } impl SnapshotCreator { - fn for_tests(blob_store: Arc, pool: ConnectionPool) -> Self { + fn for_tests(blob_store: Arc, pool: ConnectionPool) -> Self { Self { blob_store, master_pool: pool.clone(), @@ -132,7 +132,7 @@ struct ExpectedOutputs { } async fn create_miniblock( - conn: &mut StorageProcessor<'_>, + conn: &mut StorageProcessor<'_, Server>, miniblock_number: MiniblockNumber, block_logs: Vec, ) { @@ -162,7 +162,7 @@ async fn create_miniblock( } async fn create_l1_batch( - conn: &mut StorageProcessor<'_>, + conn: &mut StorageProcessor<'_, Server>, l1_batch_number: L1BatchNumber, logs_for_initial_writes: &[StorageLog], ) { @@ -186,7 +186,7 @@ async fn create_l1_batch( async fn prepare_postgres( rng: &mut impl Rng, - conn: &mut StorageProcessor<'_>, + conn: &mut StorageProcessor<'_, Server>, block_count: u32, ) -> ExpectedOutputs { conn.protocol_versions_dal() @@ -241,7 +241,7 @@ async fn prepare_postgres( #[tokio::test] async fn persisting_snapshot_metadata() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut rng = thread_rng(); let object_store_factory = ObjectStoreFactory::mock(); let object_store = object_store_factory.create_store().await; @@ -290,7 +290,7 @@ async fn persisting_snapshot_metadata() { #[tokio::test] async fn persisting_snapshot_factory_deps() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut rng = thread_rng(); let object_store_factory = ObjectStoreFactory::mock(); let object_store = object_store_factory.create_store().await; @@ -312,7 +312,7 @@ async fn persisting_snapshot_factory_deps() { #[tokio::test] async fn persisting_snapshot_logs() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut rng = thread_rng(); let object_store_factory = ObjectStoreFactory::mock(); let object_store = object_store_factory.create_store().await; @@ -348,7 +348,7 @@ async fn assert_storage_logs( #[tokio::test] async fn recovery_workflow() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut rng = thread_rng(); let object_store_factory = ObjectStoreFactory::mock(); let object_store = object_store_factory.create_store().await; @@ -414,7 +414,7 @@ async fn recovery_workflow() { #[tokio::test] async fn recovery_workflow_with_varying_chunk_size() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut rng = thread_rng(); let object_store_factory = ObjectStoreFactory::mock(); let object_store = object_store_factory.create_store().await; diff --git a/core/bin/verified_sources_fetcher/src/main.rs b/core/bin/verified_sources_fetcher/src/main.rs index 458b138cc282..c24624636a6c 100644 --- a/core/bin/verified_sources_fetcher/src/main.rs +++ b/core/bin/verified_sources_fetcher/src/main.rs @@ -1,14 +1,14 @@ use std::io::Write; use zksync_config::PostgresConfig; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server, ServerDals}; use zksync_env_config::FromEnv; use zksync_types::contract_verification_api::SourceCodeData; #[tokio::main] async fn main() { let config = PostgresConfig::from_env().unwrap(); - let pool = ConnectionPool::singleton(config.replica_url().unwrap()) + let pool = ConnectionPool::::singleton(config.replica_url().unwrap()) .build() .await .unwrap(); diff --git a/core/lib/basic_types/Cargo.toml b/core/lib/basic_types/Cargo.toml index 4e8d8af8c15a..c7f13e15f084 100644 --- a/core/lib/basic_types/Cargo.toml +++ b/core/lib/basic_types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zksync_basic_types" -version="0.1.0" +version = "0.1.0" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" @@ -10,6 +10,9 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] -web3 = { version= "0.19.0", default-features = false, features = ["http-rustls-tls", "test", "signing"] } +web3 = { version = "0.19.0", default-features = false, features = ["http-rustls-tls", "test", "signing"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" +chrono = "0.4" +strum = { version = "0.24", features = ["derive"] } +num_enum = "0.7.2" diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index 5c8b4e6ee69a..4c0b1c7f9e96 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -20,6 +20,9 @@ pub use web3::{ mod macros; pub mod basic_fri_types; pub mod network; +pub mod protocol_version; +pub mod prover_dal; +pub mod vm_version; /// Account place in the global state tree is uniquely identified by its address. /// Binary this type is represented by 160 bit big-endian representation of account address. diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs new file mode 100644 index 000000000000..a4b4e94fb8b5 --- /dev/null +++ b/core/lib/basic_types/src/protocol_version.rs @@ -0,0 +1,285 @@ +use std::convert::{TryFrom, TryInto}; + +use num_enum::TryFromPrimitive; +use serde::{Deserialize, Serialize}; +use web3::contract::{tokens::Detokenize, Error}; + +use crate::{ethabi::Token, vm_version::VmVersion, H256, U256}; + +#[repr(u16)] +#[derive( + Debug, + Clone, + Copy, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + TryFromPrimitive, + Serialize, + Deserialize, +)] +pub enum ProtocolVersionId { + Version0 = 0, + Version1, + Version2, + Version3, + Version4, + Version5, + Version6, + Version7, + Version8, + Version9, + Version10, + Version11, + Version12, + Version13, + Version14, + Version15, + Version16, + Version17, + Version18, + Version19, + Version20, + Version21, + Version22, +} + +impl ProtocolVersionId { + pub fn latest() -> Self { + Self::Version21 + } + + pub fn next() -> Self { + Self::Version22 + } + + /// Returns VM version to be used by API for this protocol version. + /// We temporary support only two latest VM versions for API. + pub fn into_api_vm_version(self) -> VmVersion { + match self { + ProtocolVersionId::Version0 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version1 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version2 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version3 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version4 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version5 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version6 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version7 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version8 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version9 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version10 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version11 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version12 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version13 => VmVersion::VmVirtualBlocks, + ProtocolVersionId::Version14 => VmVersion::VmVirtualBlocks, + ProtocolVersionId::Version15 => VmVersion::VmVirtualBlocks, + ProtocolVersionId::Version16 => VmVersion::VmVirtualBlocksRefundsEnhancement, + ProtocolVersionId::Version17 => VmVersion::VmVirtualBlocksRefundsEnhancement, + ProtocolVersionId::Version18 => VmVersion::VmBoojumIntegration, + ProtocolVersionId::Version19 => VmVersion::VmBoojumIntegration, + ProtocolVersionId::Version20 => VmVersion::Vm1_4_1, + ProtocolVersionId::Version21 => VmVersion::Vm1_4_2, + ProtocolVersionId::Version22 => VmVersion::Vm1_4_2, + } + } + + // It is possible that some external nodes do not store protocol versions for versions below 9. + // That's why we assume that whenever a protocol version is not present, version 9 is to be used. + pub fn last_potentially_undefined() -> Self { + Self::Version9 + } + + pub fn is_pre_boojum(&self) -> bool { + self <= &Self::Version17 + } + + pub fn is_pre_shared_bridge(&self) -> bool { + // TODO: review this when we actually deploy shared bridge + true + } + + pub fn is_1_4_0(&self) -> bool { + self >= &ProtocolVersionId::Version18 && self < &ProtocolVersionId::Version20 + } + + pub fn is_1_4_1(&self) -> bool { + self == &ProtocolVersionId::Version20 + } + + pub fn is_post_1_4_1(&self) -> bool { + self >= &ProtocolVersionId::Version20 + } + + pub fn is_post_1_4_2(&self) -> bool { + self >= &ProtocolVersionId::Version21 + } + + pub fn is_pre_1_4_2(&self) -> bool { + self < &ProtocolVersionId::Version21 + } +} + +impl Default for ProtocolVersionId { + fn default() -> Self { + Self::latest() + } +} + +impl TryFrom for ProtocolVersionId { + type Error = String; + + fn try_from(value: U256) -> Result { + if value > U256::from(u16::MAX) { + Err(format!("unknown protocol version ID: {}", value)) + } else { + (value.as_u32() as u16) + .try_into() + .map_err(|_| format!("unknown protocol version ID: {}", value)) + } + } +} + +// TODO: Do we even need this? I reckon we could merge this with `ProtocolVersionId`. +#[repr(u16)] +#[derive( + Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, TryFromPrimitive, Serialize, Deserialize, +)] +pub enum FriProtocolVersionId { + Version0 = 0, + Version1, + Version2, + Version3, + Version4, + Version5, + Version6, + Version7, + Version8, + Version9, + Version10, + Version11, + Version12, + Version13, + Version14, + Version15, + Version16, + Version17, + Version18, + Version19, + Version20, + Version21, + Version22, +} + +impl FriProtocolVersionId { + pub fn latest() -> Self { + Self::Version21 + } + + pub fn next() -> Self { + Self::Version22 + } +} + +impl Default for FriProtocolVersionId { + fn default() -> Self { + Self::latest() + } +} + +impl From for FriProtocolVersionId { + fn from(protocol_version: ProtocolVersionId) -> Self { + match protocol_version { + ProtocolVersionId::Version0 => FriProtocolVersionId::Version0, + ProtocolVersionId::Version1 => FriProtocolVersionId::Version1, + ProtocolVersionId::Version2 => FriProtocolVersionId::Version2, + ProtocolVersionId::Version3 => FriProtocolVersionId::Version3, + ProtocolVersionId::Version4 => FriProtocolVersionId::Version4, + ProtocolVersionId::Version5 => FriProtocolVersionId::Version5, + ProtocolVersionId::Version6 => FriProtocolVersionId::Version6, + ProtocolVersionId::Version7 => FriProtocolVersionId::Version7, + ProtocolVersionId::Version8 => FriProtocolVersionId::Version8, + ProtocolVersionId::Version9 => FriProtocolVersionId::Version9, + ProtocolVersionId::Version10 => FriProtocolVersionId::Version10, + ProtocolVersionId::Version11 => FriProtocolVersionId::Version11, + ProtocolVersionId::Version12 => FriProtocolVersionId::Version12, + ProtocolVersionId::Version13 => FriProtocolVersionId::Version13, + ProtocolVersionId::Version14 => FriProtocolVersionId::Version14, + ProtocolVersionId::Version15 => FriProtocolVersionId::Version15, + ProtocolVersionId::Version16 => FriProtocolVersionId::Version16, + ProtocolVersionId::Version17 => FriProtocolVersionId::Version17, + ProtocolVersionId::Version18 => FriProtocolVersionId::Version18, + ProtocolVersionId::Version19 => FriProtocolVersionId::Version19, + ProtocolVersionId::Version20 => FriProtocolVersionId::Version20, + ProtocolVersionId::Version21 => FriProtocolVersionId::Version21, + ProtocolVersionId::Version22 => FriProtocolVersionId::Version22, + } + } +} + +#[derive(Debug, Clone, Copy, Default, Eq, PartialEq, Serialize, Deserialize)] +pub struct VerifierParams { + pub recursion_node_level_vk_hash: H256, + pub recursion_leaf_level_vk_hash: H256, + pub recursion_circuits_set_vks_hash: H256, +} + +impl Detokenize for VerifierParams { + fn from_tokens(tokens: Vec) -> Result { + if tokens.len() != 1 { + return Err(Error::Abi(crate::ethabi::Error::InvalidData)); + } + + let tokens = match tokens[0].clone() { + Token::Tuple(tokens) => tokens, + _ => return Err(Error::Abi(crate::ethabi::Error::InvalidData)), + }; + + let vks_vec: Vec = tokens + .into_iter() + .map(|token| H256::from_slice(&token.into_fixed_bytes().unwrap())) + .collect(); + Ok(VerifierParams { + recursion_node_level_vk_hash: vks_vec[0], + recursion_leaf_level_vk_hash: vks_vec[1], + recursion_circuits_set_vks_hash: vks_vec[2], + }) + } +} + +#[derive(Debug, Clone, Copy, Default, Eq, PartialEq, Serialize, Deserialize)] +pub struct L1VerifierConfig { + pub params: VerifierParams, + pub recursion_scheduler_level_vk_hash: H256, +} + +impl From for VmVersion { + fn from(value: ProtocolVersionId) -> Self { + match value { + ProtocolVersionId::Version0 => VmVersion::M5WithoutRefunds, + ProtocolVersionId::Version1 => VmVersion::M5WithoutRefunds, + ProtocolVersionId::Version2 => VmVersion::M5WithRefunds, + ProtocolVersionId::Version3 => VmVersion::M5WithRefunds, + ProtocolVersionId::Version4 => VmVersion::M6Initial, + ProtocolVersionId::Version5 => VmVersion::M6BugWithCompressionFixed, + ProtocolVersionId::Version6 => VmVersion::M6BugWithCompressionFixed, + ProtocolVersionId::Version7 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version8 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version9 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version10 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version11 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version12 => VmVersion::Vm1_3_2, + ProtocolVersionId::Version13 => VmVersion::VmVirtualBlocks, + ProtocolVersionId::Version14 => VmVersion::VmVirtualBlocks, + ProtocolVersionId::Version15 => VmVersion::VmVirtualBlocks, + ProtocolVersionId::Version16 => VmVersion::VmVirtualBlocksRefundsEnhancement, + ProtocolVersionId::Version17 => VmVersion::VmVirtualBlocksRefundsEnhancement, + ProtocolVersionId::Version18 => VmVersion::VmBoojumIntegration, + ProtocolVersionId::Version19 => VmVersion::VmBoojumIntegration, + ProtocolVersionId::Version20 => VmVersion::Vm1_4_1, + ProtocolVersionId::Version21 => VmVersion::Vm1_4_2, + ProtocolVersionId::Version22 => VmVersion::Vm1_4_2, + } + } +} diff --git a/core/lib/basic_types/src/prover_dal.rs b/core/lib/basic_types/src/prover_dal.rs new file mode 100644 index 000000000000..827d1942b6a7 --- /dev/null +++ b/core/lib/basic_types/src/prover_dal.rs @@ -0,0 +1,217 @@ +//! Types exposed by the prover DAL for general-purpose use. +use std::{net::IpAddr, ops::Add}; + +use chrono::{DateTime, Duration, Utc}; + +use crate::{basic_fri_types::AggregationRound, L1BatchNumber}; + +// This currently lives in `zksync_prover_types` -- we don't want a dependency between prover types (`zkevm_test_harness`) and DAL. +// This will be gone as part of 1.5.0, when EIP4844 becomes normal jobs, rather than special cased ones. +pub const EIP_4844_CIRCUIT_ID: u8 = 255; + +#[derive(Debug, Clone)] +pub struct FriProverJobMetadata { + pub id: u32, + pub block_number: L1BatchNumber, + pub circuit_id: u8, + pub aggregation_round: AggregationRound, + pub sequence_number: usize, + pub depth: u16, + pub is_node_final_proof: bool, +} + +#[derive(Debug, Clone, Copy, Default)] +pub struct JobCountStatistics { + pub queued: usize, + pub in_progress: usize, + pub failed: usize, + pub successful: usize, +} + +impl Add for JobCountStatistics { + type Output = JobCountStatistics; + + fn add(self, rhs: Self) -> Self::Output { + Self { + queued: self.queued + rhs.queued, + in_progress: self.in_progress + rhs.in_progress, + failed: self.failed + rhs.failed, + successful: self.successful + rhs.successful, + } + } +} + +#[derive(Debug)] +pub struct StuckJobs { + pub id: u64, + pub status: String, + pub attempts: u64, +} + +// TODO (PLA-774): Redundant structure, should be replaced with `std::net::SocketAddr`. +#[derive(Debug, Clone)] +pub struct SocketAddress { + pub host: IpAddr, + pub port: u16, +} + +impl From for std::net::SocketAddr { + fn from(socket_address: SocketAddress) -> Self { + Self::new(socket_address.host, socket_address.port) + } +} + +impl From for SocketAddress { + fn from(socket_address: std::net::SocketAddr) -> Self { + Self { + host: socket_address.ip(), + port: socket_address.port(), + } + } +} + +#[derive(Debug, Clone)] +pub struct LeafAggregationJobMetadata { + pub id: u32, + pub block_number: L1BatchNumber, + pub circuit_id: u8, + pub prover_job_ids_for_proofs: Vec, +} + +#[derive(Debug, Clone)] +pub struct NodeAggregationJobMetadata { + pub id: u32, + pub block_number: L1BatchNumber, + pub circuit_id: u8, + pub depth: u16, + pub prover_job_ids_for_proofs: Vec, +} + +#[derive(Debug)] +pub struct JobPosition { + pub aggregation_round: AggregationRound, + pub sequence_number: usize, +} + +#[derive(Debug, Default)] +pub struct ProverJobStatusFailed { + pub started_at: DateTime, + pub error: String, +} + +#[derive(Debug)] +pub struct ProverJobStatusSuccessful { + pub started_at: DateTime, + pub time_taken: Duration, +} + +impl Default for ProverJobStatusSuccessful { + fn default() -> Self { + ProverJobStatusSuccessful { + started_at: DateTime::default(), + time_taken: Duration::zero(), + } + } +} + +#[derive(Debug, Default)] +pub struct ProverJobStatusInProgress { + pub started_at: DateTime, +} + +#[derive(Debug)] +pub struct WitnessJobStatusSuccessful { + pub started_at: DateTime, + pub time_taken: Duration, +} + +impl Default for WitnessJobStatusSuccessful { + fn default() -> Self { + WitnessJobStatusSuccessful { + started_at: DateTime::default(), + time_taken: Duration::zero(), + } + } +} + +#[derive(Debug, Default)] +pub struct WitnessJobStatusFailed { + pub started_at: DateTime, + pub error: String, +} + +#[derive(Debug, strum::Display, strum::EnumString, strum::AsRefStr)] +pub enum ProverJobStatus { + #[strum(serialize = "queued")] + Queued, + #[strum(serialize = "in_progress")] + InProgress(ProverJobStatusInProgress), + #[strum(serialize = "successful")] + Successful(ProverJobStatusSuccessful), + #[strum(serialize = "failed")] + Failed(ProverJobStatusFailed), + #[strum(serialize = "skipped")] + Skipped, + #[strum(serialize = "ignored")] + Ignored, +} + +#[derive(Debug, strum::Display, strum::EnumString, strum::AsRefStr)] +pub enum WitnessJobStatus { + #[strum(serialize = "failed")] + Failed(WitnessJobStatusFailed), + #[strum(serialize = "skipped")] + Skipped, + #[strum(serialize = "successful")] + Successful(WitnessJobStatusSuccessful), + #[strum(serialize = "waiting_for_artifacts")] + WaitingForArtifacts, + #[strum(serialize = "waiting_for_proofs")] + WaitingForProofs, + #[strum(serialize = "in_progress")] + InProgress, + #[strum(serialize = "queued")] + Queued, +} + +#[derive(Debug)] +pub struct WitnessJobInfo { + pub block_number: L1BatchNumber, + pub created_at: DateTime, + pub updated_at: DateTime, + pub status: WitnessJobStatus, + pub position: JobPosition, +} + +#[derive(Debug)] +pub struct ProverJobInfo { + pub id: u32, + pub block_number: L1BatchNumber, + pub circuit_type: String, + pub position: JobPosition, + pub input_length: u64, + pub status: ProverJobStatus, + pub attempts: u32, + pub created_at: DateTime, + pub updated_at: DateTime, +} + +#[derive(Debug)] +pub struct JobExtendedStatistics { + pub successful_padding: L1BatchNumber, + pub queued_padding: L1BatchNumber, + pub queued_padding_len: u32, + pub active_area: Vec, +} + +#[derive(Debug, Copy, Clone)] +pub enum GpuProverInstanceStatus { + // The instance is available for processing. + Available, + // The instance is running at full capacity. + Full, + // The instance is reserved by an synthesizer. + Reserved, + // The instance is not alive anymore. + Dead, +} diff --git a/core/lib/types/src/vm_version.rs b/core/lib/basic_types/src/vm_version.rs similarity index 100% rename from core/lib/types/src/vm_version.rs rename to core/lib/basic_types/src/vm_version.rs diff --git a/core/lib/circuit_breaker/src/l1_txs.rs b/core/lib/circuit_breaker/src/l1_txs.rs index 5d3c4dc9ccf3..b31b742c0cfb 100644 --- a/core/lib/circuit_breaker/src/l1_txs.rs +++ b/core/lib/circuit_breaker/src/l1_txs.rs @@ -1,10 +1,10 @@ -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server, ServerDals}; use crate::{CircuitBreaker, CircuitBreakerError}; #[derive(Debug)] pub struct FailedL1TransactionChecker { - pub pool: ConnectionPool, + pub pool: ConnectionPool, } #[async_trait::async_trait] diff --git a/core/lib/circuit_breaker/src/replication_lag.rs b/core/lib/circuit_breaker/src/replication_lag.rs index 244c53349cee..b12686e54edc 100644 --- a/core/lib/circuit_breaker/src/replication_lag.rs +++ b/core/lib/circuit_breaker/src/replication_lag.rs @@ -1,10 +1,10 @@ -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server, ServerDals}; use crate::{CircuitBreaker, CircuitBreakerError}; #[derive(Debug)] pub struct ReplicationLagChecker { - pub pool: ConnectionPool, + pub pool: ConnectionPool, pub replication_lag_limit_sec: Option, } diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index e004a84e53e4..f7817c448029 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -17,15 +17,14 @@ zksync_utils = { path = "../utils" } zksync_system_constants = { path = "../constants" } zksync_contracts = { path = "../contracts" } zksync_types = { path = "../types" } -zksync_health_check = { path = "../health_check" } zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5329a809cfc06d4939fb5ece26c9ad1e1741c50a" } zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5329a809cfc06d4939fb5ece26c9ad1e1741c50a" } zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5329a809cfc06d4939fb5ece26c9ad1e1741c50a" } +zksync_db_connection = { path = "../db_connection" } itertools = "0.10.1" thiserror = "1.0" anyhow = "1.0" -url = "2" prost = "0.12.1" rand = "0.8" tokio = { version = "1", features = ["full"] } @@ -46,13 +45,9 @@ serde_json = "1.0" bigdecimal = "0.3.0" bincode = "1" hex = "0.4" -once_cell = "1.7" strum = { version = "0.24", features = ["derive"] } tracing = "0.1" chrono = { version = "0.4", features = ["serde"] } -[dev-dependencies] -assert_matches = "1.5.0" - [build-dependencies] zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5329a809cfc06d4939fb5ece26c9ad1e1741c50a" } diff --git a/core/lib/dal/README.md b/core/lib/dal/README.md index 161afbec3bf3..2fb03150ce29 100644 --- a/core/lib/dal/README.md +++ b/core/lib/dal/README.md @@ -90,8 +90,8 @@ Some tips and tricks to make contributing to DAL easier: - If you want to add a new DB query, search the DAL code or the [`.sqlx`](.sqlx) directory for the identical / equivalent queries. Reuse is almost always better than duplication. -- It usually makes sense to instrument your queries using [`instrument`](src/instrument.rs) tooling. See the - `instrument` module docs for details. +- It usually makes sense to instrument your queries using [`instrument`](../db_connection/src/instrument.rs) tooling. + See the `instrument` module docs for details. - It's best to cover added queries with unit tests to ensure they work and don't break in the future. `sqlx` has compile-time schema checking, but it's not a panacea. - If there are doubts as to the query performance, run a query with [`EXPLAIN`] / `EXPLAIN ANALYZE` prefixes against a diff --git a/core/lib/dal/src/basic_witness_input_producer_dal.rs b/core/lib/dal/src/basic_witness_input_producer_dal.rs index 99eac71a1d5f..56666a836899 100644 --- a/core/lib/dal/src/basic_witness_input_producer_dal.rs +++ b/core/lib/dal/src/basic_witness_input_producer_dal.rs @@ -2,17 +2,18 @@ use std::time::{Duration, Instant}; use sqlx::postgres::types::PgInterval; -use zksync_types::L1BatchNumber; - -use crate::{ +use zksync_db_connection::{ instrument::InstrumentExt, - time_utils::{duration_to_naive_time, pg_interval_from_duration}, - StorageProcessor, + processor::StorageProcessor, + utils::{duration_to_naive_time, pg_interval_from_duration}, }; +use zksync_types::L1BatchNumber; + +use crate::Server; #[derive(Debug)] pub struct BasicWitnessInputProducerDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } /// The amount of attempts to process a job before giving up. diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 8b2463acc1fb..71c78d63189e 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -6,6 +6,9 @@ use std::{ use anyhow::Context as _; use bigdecimal::{BigDecimal, FromPrimitive, ToPrimitive}; +use zksync_db_connection::{ + instrument::InstrumentExt, interpolate_query, match_query_as, processor::StorageProcessor, +}; use zksync_types::{ aggregated_operations::AggregatedActionType, block::{BlockGasCount, L1BatchHeader, L1BatchTreeData, MiniblockHeader}, @@ -16,14 +19,13 @@ use zksync_types::{ }; use crate::{ - instrument::InstrumentExt, models::storage_block::{StorageL1Batch, StorageL1BatchHeader, StorageMiniblockHeader}, - StorageProcessor, + Server, ServerDals, }; #[derive(Debug)] pub struct BlocksDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } impl BlocksDal<'_, '_> { @@ -2348,11 +2350,11 @@ mod tests { }; use super::*; - use crate::{tests::create_miniblock_header, ConnectionPool}; + use crate::{tests::create_miniblock_header, ConnectionPool, Server, ServerDals}; #[tokio::test] async fn loading_l1_batch_header() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -2408,7 +2410,7 @@ mod tests { #[tokio::test] async fn getting_predicted_gas() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -2469,7 +2471,7 @@ mod tests { #[allow(deprecated)] // that's the whole point #[tokio::test] async fn checking_fee_account_address_in_l1_batches() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); assert!(conn .blocks_dal() @@ -2481,7 +2483,7 @@ mod tests { #[allow(deprecated)] // that's the whole point #[tokio::test] async fn ensuring_fee_account_address_for_miniblocks() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index a8cc7a3f8795..d5ee2a6b0e21 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -1,3 +1,6 @@ +use zksync_db_connection::{ + instrument::InstrumentExt, interpolate_query, match_query_as, processor::StorageProcessor, +}; use zksync_system_constants::EMPTY_UNCLES_HASH; use zksync_types::{ api, @@ -9,19 +12,18 @@ use zksync_types::{ use zksync_utils::bigdecimal_to_u256; use crate::{ - instrument::InstrumentExt, models::{ storage_block::{ResolvedL1BatchForMiniblock, StorageBlockDetails, StorageL1BatchDetails}, storage_transaction::CallTrace, }, - StorageProcessor, + Server, ServerDals, }; const BLOCK_GAS_LIMIT: u32 = u32::MAX; #[derive(Debug)] pub struct BlocksWeb3Dal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } impl BlocksWeb3Dal<'_, '_> { @@ -654,12 +656,12 @@ mod tests { create_miniblock_header, create_snapshot_recovery, mock_execution_result, mock_l2_transaction, }, - ConnectionPool, + ConnectionPool, Server, }; #[tokio::test] async fn getting_web3_block_and_tx_count() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.blocks_dal() .delete_miniblocks(MiniblockNumber(0)) @@ -707,7 +709,7 @@ mod tests { #[tokio::test] async fn resolving_earliest_block_id() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); let miniblock_number = conn @@ -733,7 +735,7 @@ mod tests { #[tokio::test] async fn resolving_latest_block_id() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -799,7 +801,7 @@ mod tests { #[tokio::test] async fn resolving_pending_block_id_for_snapshot_recovery() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); let snapshot_recovery = create_snapshot_recovery(); conn.snapshot_recovery_dal() @@ -817,7 +819,7 @@ mod tests { #[tokio::test] async fn resolving_block_by_hash() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -846,7 +848,7 @@ mod tests { #[tokio::test] async fn getting_traces_for_block() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index 4f9c71ff3310..95283ffa4fd5 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -1,15 +1,16 @@ use anyhow::Context as _; use zksync_consensus_roles::validator; use zksync_consensus_storage::ReplicaState; +use zksync_db_connection::processor::StorageProcessor; use zksync_types::MiniblockNumber; pub use crate::models::consensus::Payload; -use crate::StorageProcessor; +use crate::{Server, ServerDals}; /// Storage access methods for `zksync_core::consensus` module. #[derive(Debug)] pub struct ConsensusDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub storage: &'a mut StorageProcessor<'c, Server>, } impl ConsensusDal<'_, '_> { @@ -329,12 +330,12 @@ mod tests { use zksync_consensus_roles::validator; use zksync_consensus_storage::ReplicaState; - use crate::ConnectionPool; + use crate::{ConnectionPool, Server, ServerDals}; #[tokio::test] async fn replica_state_read_write() { let rng = &mut rand::thread_rng(); - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); assert_eq!(None, conn.consensus_dal().genesis().await.unwrap()); for n in 0..3 { diff --git a/core/lib/dal/src/contract_verification_dal.rs b/core/lib/dal/src/contract_verification_dal.rs index 1f1f943fe79a..6dd985502127 100644 --- a/core/lib/dal/src/contract_verification_dal.rs +++ b/core/lib/dal/src/contract_verification_dal.rs @@ -6,6 +6,7 @@ use std::{ use anyhow::Context as _; use sqlx::postgres::types::PgInterval; +use zksync_db_connection::processor::StorageProcessor; use zksync_types::{ contract_verification_api::{ DeployContractCalldata, VerificationIncomingRequest, VerificationInfo, VerificationRequest, @@ -14,11 +15,11 @@ use zksync_types::{ get_code_key, Address, CONTRACT_DEPLOYER_ADDRESS, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, }; -use crate::{models::storage_verification_request::StorageVerificationRequest, StorageProcessor}; +use crate::{models::storage_verification_request::StorageVerificationRequest, Server}; #[derive(Debug)] pub struct ContractVerificationDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } #[derive(Debug)] diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index 1dfbae8f0a74..db28c1712e50 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -2,6 +2,7 @@ use std::{convert::TryFrom, str::FromStr}; use anyhow::Context as _; use sqlx::types::chrono::{DateTime, Utc}; +use zksync_db_connection::{interpolate_query, match_query_as, processor::StorageProcessor}; use zksync_types::{ aggregated_operations::AggregatedActionType, eth_sender::{EthTx, EthTxBlobSidecar, TxHistory, TxHistoryToSend}, @@ -12,12 +13,12 @@ use crate::{ models::storage_eth_tx::{ L1BatchEthSenderStats, StorageEthTx, StorageTxHistory, StorageTxHistoryToSend, }, - StorageProcessor, + Server, }; #[derive(Debug)] pub struct EthSenderDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } impl EthSenderDal<'_, '_> { diff --git a/core/lib/dal/src/events_dal.rs b/core/lib/dal/src/events_dal.rs index 9414fc407f70..1300108021c6 100644 --- a/core/lib/dal/src/events_dal.rs +++ b/core/lib/dal/src/events_dal.rs @@ -1,6 +1,9 @@ use std::{collections::HashMap, fmt}; use sqlx::types::chrono::Utc; +use zksync_db_connection::{ + instrument::InstrumentExt, processor::StorageProcessor, write_str, writeln_str, +}; use zksync_system_constants::L1_MESSENGER_ADDRESS; use zksync_types::{ api, @@ -11,9 +14,8 @@ use zksync_types::{ }; use crate::{ - instrument::InstrumentExt, models::storage_event::{StorageL2ToL1Log, StorageWeb3Log}, - SqlxError, StorageProcessor, + Server, ServerDals, SqlxError, }; /// Wrapper around an optional event topic allowing to hex-format it for `COPY` instructions. @@ -32,7 +34,7 @@ impl fmt::LowerHex for EventTopic<'_> { #[derive(Debug)] pub struct EventsDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } impl EventsDal<'_, '_> { @@ -395,7 +397,7 @@ mod tests { use zksync_types::{Address, L1BatchNumber, ProtocolVersion}; use super::*; - use crate::{tests::create_miniblock_header, ConnectionPool}; + use crate::{tests::create_miniblock_header, ConnectionPool, Server}; fn create_vm_event(index: u8, topic_count: u8) -> VmEvent { assert!(topic_count <= 4); @@ -409,7 +411,7 @@ mod tests { #[tokio::test] async fn storing_events() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.events_dal().rollback_events(MiniblockNumber(0)).await; conn.blocks_dal() @@ -485,7 +487,7 @@ mod tests { #[tokio::test] async fn storing_l2_to_l1_logs() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.events_dal() .rollback_l2_to_l1_logs(MiniblockNumber(0)) diff --git a/core/lib/dal/src/events_web3_dal.rs b/core/lib/dal/src/events_web3_dal.rs index 8b5d5053aea1..1098c520347c 100644 --- a/core/lib/dal/src/events_web3_dal.rs +++ b/core/lib/dal/src/events_web3_dal.rs @@ -3,18 +3,17 @@ use sqlx::{ query::{Query, QueryAs}, Postgres, Row, }; +use zksync_db_connection::{instrument::InstrumentExt, processor::StorageProcessor}; use zksync_types::{ api::{GetLogsFilter, Log}, Address, MiniblockNumber, H256, }; -use crate::{ - instrument::InstrumentExt, models::storage_event::StorageWeb3Log, SqlxError, StorageProcessor, -}; +use crate::{models::storage_event::StorageWeb3Log, Server, SqlxError}; #[derive(Debug)] pub struct EventsWeb3Dal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } impl EventsWeb3Dal<'_, '_> { @@ -259,11 +258,11 @@ mod tests { use zksync_types::{Address, H256}; use super::*; - use crate::connection::ConnectionPool; + use crate::{ConnectionPool, Server}; #[tokio::test] async fn test_build_get_logs_where_clause() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let storage = &mut connection_pool.access_storage().await.unwrap(); let events_web3_dal = EventsWeb3Dal { storage }; let filter = GetLogsFilter { @@ -284,7 +283,7 @@ mod tests { #[tokio::test] async fn test_build_get_logs_with_multiple_topics_where_clause() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let storage = &mut connection_pool.access_storage().await.unwrap(); let events_web3_dal = EventsWeb3Dal { storage }; let filter = GetLogsFilter { @@ -318,7 +317,7 @@ mod tests { #[tokio::test] async fn test_build_get_logs_with_no_address_where_clause() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let storage = &mut connection_pool.access_storage().await.unwrap(); let events_web3_dal = EventsWeb3Dal { storage }; let filter = GetLogsFilter { diff --git a/core/lib/dal/src/factory_deps_dal.rs b/core/lib/dal/src/factory_deps_dal.rs index b8dc1bbbfe75..7c415a23e740 100644 --- a/core/lib/dal/src/factory_deps_dal.rs +++ b/core/lib/dal/src/factory_deps_dal.rs @@ -2,15 +2,16 @@ use std::collections::{HashMap, HashSet}; use anyhow::Context as _; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; +use zksync_db_connection::processor::StorageProcessor; use zksync_types::{MiniblockNumber, H256, U256}; use zksync_utils::{bytes_to_be_words, bytes_to_chunks}; -use crate::StorageProcessor; +use crate::Server; /// DAL methods related to factory dependencies. #[derive(Debug)] pub struct FactoryDepsDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } impl FactoryDepsDal<'_, '_> { diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index 345691c80170..7b3336608bf9 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -4,19 +4,15 @@ #![warn(clippy::cast_lossless)] pub use sqlx::{types::BigDecimal, Error as SqlxError}; +use zksync_db_connection::processor::StorageMarker; +pub use zksync_db_connection::{connection::ConnectionPool, processor::StorageProcessor}; -pub use crate::connection::{ConnectionPool, StorageProcessor}; use crate::{ basic_witness_input_producer_dal::BasicWitnessInputProducerDal, blocks_dal::BlocksDal, blocks_web3_dal::BlocksWeb3Dal, consensus_dal::ConsensusDal, contract_verification_dal::ContractVerificationDal, eth_sender_dal::EthSenderDal, events_dal::EventsDal, events_web3_dal::EventsWeb3Dal, factory_deps_dal::FactoryDepsDal, - fri_gpu_prover_queue_dal::FriGpuProverQueueDal, - fri_proof_compressor_dal::FriProofCompressorDal, - fri_protocol_versions_dal::FriProtocolVersionsDal, fri_prover_dal::FriProverDal, - fri_scheduler_dependency_tracker_dal::FriSchedulerDependencyTrackerDal, - fri_witness_generator_dal::FriWitnessGeneratorDal, proof_generation_dal::ProofGenerationDal, - protocol_versions_dal::ProtocolVersionsDal, + proof_generation_dal::ProofGenerationDal, protocol_versions_dal::ProtocolVersionsDal, protocol_versions_web3_dal::ProtocolVersionsWeb3Dal, snapshot_recovery_dal::SnapshotRecoveryDal, snapshots_creator_dal::SnapshotsCreatorDal, snapshots_dal::SnapshotsDal, storage_logs_dal::StorageLogsDal, @@ -26,27 +22,15 @@ use crate::{ transactions_web3_dal::TransactionsWeb3Dal, }; -#[macro_use] -mod macro_utils; pub mod basic_witness_input_producer_dal; pub mod blocks_dal; pub mod blocks_web3_dal; -pub mod connection; pub mod consensus_dal; pub mod contract_verification_dal; pub mod eth_sender_dal; pub mod events_dal; pub mod events_web3_dal; pub mod factory_deps_dal; -pub mod fri_gpu_prover_queue_dal; -pub mod fri_proof_compressor_dal; -pub mod fri_protocol_versions_dal; -pub mod fri_prover_dal; -pub mod fri_scheduler_dependency_tracker_dal; -pub mod fri_witness_generator_dal; -pub mod healthcheck; -mod instrument; -mod metrics; mod models; pub mod proof_generation_dal; pub mod protocol_versions_dal; @@ -60,141 +44,186 @@ pub mod storage_logs_dedup_dal; pub mod storage_web3_dal; pub mod sync_dal; pub mod system_dal; -pub mod time_utils; pub mod tokens_dal; pub mod tokens_web3_dal; pub mod transactions_dal; pub mod transactions_web3_dal; +pub mod metrics; + #[cfg(test)] mod tests; -impl<'a> StorageProcessor<'a> { - pub fn transactions_dal(&mut self) -> TransactionsDal<'_, 'a> { +// This module is private and serves as a way to seal the trait. +mod private { + pub trait Sealed {} +} + +// Here we are making the trait sealed, because it should be public to function correctly, but we don't +// want to allow any other downstream implementations of this trait. +pub trait ServerDals<'a>: private::Sealed +where + Self: 'a, +{ + fn transactions_dal(&mut self) -> TransactionsDal<'_, 'a>; + + fn transactions_web3_dal(&mut self) -> TransactionsWeb3Dal<'_, 'a>; + + fn basic_witness_input_producer_dal(&mut self) -> BasicWitnessInputProducerDal<'_, 'a>; + + fn blocks_dal(&mut self) -> BlocksDal<'_, 'a>; + + fn blocks_web3_dal(&mut self) -> BlocksWeb3Dal<'_, 'a>; + + fn consensus_dal(&mut self) -> ConsensusDal<'_, 'a>; + + fn eth_sender_dal(&mut self) -> EthSenderDal<'_, 'a>; + + fn events_dal(&mut self) -> EventsDal<'_, 'a>; + + fn events_web3_dal(&mut self) -> EventsWeb3Dal<'_, 'a>; + + fn factory_deps_dal(&mut self) -> FactoryDepsDal<'_, 'a>; + + fn storage_web3_dal(&mut self) -> StorageWeb3Dal<'_, 'a>; + + fn storage_logs_dal(&mut self) -> StorageLogsDal<'_, 'a>; + + #[deprecated(note = "Soft-removed in favor of `storage_logs`; don't use")] + #[allow(deprecated)] + fn storage_dal(&mut self) -> storage_dal::StorageDal<'_, 'a>; + + fn storage_logs_dedup_dal(&mut self) -> StorageLogsDedupDal<'_, 'a>; + + fn tokens_dal(&mut self) -> TokensDal<'_, 'a>; + + fn tokens_web3_dal(&mut self) -> TokensWeb3Dal<'_, 'a>; + + fn contract_verification_dal(&mut self) -> ContractVerificationDal<'_, 'a>; + + fn protocol_versions_dal(&mut self) -> ProtocolVersionsDal<'_, 'a>; + + fn protocol_versions_web3_dal(&mut self) -> ProtocolVersionsWeb3Dal<'_, 'a>; + + fn sync_dal(&mut self) -> SyncDal<'_, 'a>; + + fn proof_generation_dal(&mut self) -> ProofGenerationDal<'_, 'a>; + + fn system_dal(&mut self) -> SystemDal<'_, 'a>; + + fn snapshots_dal(&mut self) -> SnapshotsDal<'_, 'a>; + + fn snapshots_creator_dal(&mut self) -> SnapshotsCreatorDal<'_, 'a>; + + fn snapshot_recovery_dal(&mut self) -> SnapshotRecoveryDal<'_, 'a>; +} + +#[derive(Clone, Debug)] +pub struct Server; + +// Implement the marker trait for the Server to be able to use it in StorageProcessor. +impl StorageMarker for Server {} +// Implement the sealed trait for the struct itself. +impl private::Sealed for StorageProcessor<'_, Server> {} + +impl<'a> ServerDals<'a> for StorageProcessor<'a, Server> { + fn transactions_dal(&mut self) -> TransactionsDal<'_, 'a> { TransactionsDal { storage: self } } - pub fn transactions_web3_dal(&mut self) -> TransactionsWeb3Dal<'_, 'a> { + fn transactions_web3_dal(&mut self) -> TransactionsWeb3Dal<'_, 'a> { TransactionsWeb3Dal { storage: self } } - pub fn basic_witness_input_producer_dal(&mut self) -> BasicWitnessInputProducerDal<'_, 'a> { + fn basic_witness_input_producer_dal(&mut self) -> BasicWitnessInputProducerDal<'_, 'a> { BasicWitnessInputProducerDal { storage: self } } - pub fn blocks_dal(&mut self) -> BlocksDal<'_, 'a> { + fn blocks_dal(&mut self) -> BlocksDal<'_, 'a> { BlocksDal { storage: self } } - pub fn blocks_web3_dal(&mut self) -> BlocksWeb3Dal<'_, 'a> { + fn blocks_web3_dal(&mut self) -> BlocksWeb3Dal<'_, 'a> { BlocksWeb3Dal { storage: self } } - pub fn consensus_dal(&mut self) -> ConsensusDal<'_, 'a> { + fn consensus_dal(&mut self) -> ConsensusDal<'_, 'a> { ConsensusDal { storage: self } } - pub fn eth_sender_dal(&mut self) -> EthSenderDal<'_, 'a> { + fn eth_sender_dal(&mut self) -> EthSenderDal<'_, 'a> { EthSenderDal { storage: self } } - pub fn events_dal(&mut self) -> EventsDal<'_, 'a> { + fn events_dal(&mut self) -> EventsDal<'_, 'a> { EventsDal { storage: self } } - pub fn events_web3_dal(&mut self) -> EventsWeb3Dal<'_, 'a> { + fn events_web3_dal(&mut self) -> EventsWeb3Dal<'_, 'a> { EventsWeb3Dal { storage: self } } - pub fn factory_deps_dal(&mut self) -> FactoryDepsDal<'_, 'a> { + fn factory_deps_dal(&mut self) -> FactoryDepsDal<'_, 'a> { FactoryDepsDal { storage: self } } - pub fn storage_web3_dal(&mut self) -> StorageWeb3Dal<'_, 'a> { + fn storage_web3_dal(&mut self) -> StorageWeb3Dal<'_, 'a> { StorageWeb3Dal { storage: self } } - pub fn storage_logs_dal(&mut self) -> StorageLogsDal<'_, 'a> { + fn storage_logs_dal(&mut self) -> StorageLogsDal<'_, 'a> { StorageLogsDal { storage: self } } - #[deprecated(note = "Soft-removed in favor of `storage_logs`; don't use")] - #[allow(deprecated)] - pub fn storage_dal(&mut self) -> storage_dal::StorageDal<'_, 'a> { + fn storage_dal(&mut self) -> storage_dal::StorageDal<'_, 'a> { storage_dal::StorageDal { storage: self } } - pub fn storage_logs_dedup_dal(&mut self) -> StorageLogsDedupDal<'_, 'a> { + fn storage_logs_dedup_dal(&mut self) -> StorageLogsDedupDal<'_, 'a> { StorageLogsDedupDal { storage: self } } - pub fn tokens_dal(&mut self) -> TokensDal<'_, 'a> { + fn tokens_dal(&mut self) -> TokensDal<'_, 'a> { TokensDal { storage: self } } - pub fn tokens_web3_dal(&mut self) -> TokensWeb3Dal<'_, 'a> { + fn tokens_web3_dal(&mut self) -> TokensWeb3Dal<'_, 'a> { TokensWeb3Dal { storage: self } } - pub fn contract_verification_dal(&mut self) -> ContractVerificationDal<'_, 'a> { + fn contract_verification_dal(&mut self) -> ContractVerificationDal<'_, 'a> { ContractVerificationDal { storage: self } } - pub fn protocol_versions_dal(&mut self) -> ProtocolVersionsDal<'_, 'a> { + fn protocol_versions_dal(&mut self) -> ProtocolVersionsDal<'_, 'a> { ProtocolVersionsDal { storage: self } } - pub fn protocol_versions_web3_dal(&mut self) -> ProtocolVersionsWeb3Dal<'_, 'a> { + fn protocol_versions_web3_dal(&mut self) -> ProtocolVersionsWeb3Dal<'_, 'a> { ProtocolVersionsWeb3Dal { storage: self } } - pub fn fri_witness_generator_dal(&mut self) -> FriWitnessGeneratorDal<'_, 'a> { - FriWitnessGeneratorDal { storage: self } - } - - pub fn fri_prover_jobs_dal(&mut self) -> FriProverDal<'_, 'a> { - FriProverDal { storage: self } - } - - pub fn sync_dal(&mut self) -> SyncDal<'_, 'a> { + fn sync_dal(&mut self) -> SyncDal<'_, 'a> { SyncDal { storage: self } } - pub fn fri_scheduler_dependency_tracker_dal( - &mut self, - ) -> FriSchedulerDependencyTrackerDal<'_, 'a> { - FriSchedulerDependencyTrackerDal { storage: self } - } - - pub fn proof_generation_dal(&mut self) -> ProofGenerationDal<'_, 'a> { + fn proof_generation_dal(&mut self) -> ProofGenerationDal<'_, 'a> { ProofGenerationDal { storage: self } } - pub fn fri_gpu_prover_queue_dal(&mut self) -> FriGpuProverQueueDal<'_, 'a> { - FriGpuProverQueueDal { storage: self } - } - - pub fn fri_protocol_versions_dal(&mut self) -> FriProtocolVersionsDal<'_, 'a> { - FriProtocolVersionsDal { storage: self } - } - - pub fn fri_proof_compressor_dal(&mut self) -> FriProofCompressorDal<'_, 'a> { - FriProofCompressorDal { storage: self } - } - - pub fn system_dal(&mut self) -> SystemDal<'_, 'a> { + fn system_dal(&mut self) -> SystemDal<'_, 'a> { SystemDal { storage: self } } - pub fn snapshots_dal(&mut self) -> SnapshotsDal<'_, 'a> { + fn snapshots_dal(&mut self) -> SnapshotsDal<'_, 'a> { SnapshotsDal { storage: self } } - pub fn snapshots_creator_dal(&mut self) -> SnapshotsCreatorDal<'_, 'a> { + fn snapshots_creator_dal(&mut self) -> SnapshotsCreatorDal<'_, 'a> { SnapshotsCreatorDal { storage: self } } - pub fn snapshot_recovery_dal(&mut self) -> SnapshotRecoveryDal<'_, 'a> { + fn snapshot_recovery_dal(&mut self) -> SnapshotRecoveryDal<'_, 'a> { SnapshotRecoveryDal { storage: self } } } diff --git a/core/lib/dal/src/metrics.rs b/core/lib/dal/src/metrics.rs index 2bd54de8bd79..28a2bb04cb28 100644 --- a/core/lib/dal/src/metrics.rs +++ b/core/lib/dal/src/metrics.rs @@ -1,108 +1,15 @@ //! Metrics for the data access layer. +use std::time::Duration; -use std::{thread, time::Duration}; +use anyhow::Context; +use vise::{Gauge, LabeledFamily, Metrics, Unit}; +use zksync_db_connection::connection::ConnectionPool; -use anyhow::Context as _; -use vise::{ - Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, LabeledFamily, - LatencyObserver, Metrics, Unit, -}; - -use crate::ConnectionPool; - -/// Request-related DB metrics. -#[derive(Debug, Metrics)] -#[metrics(prefix = "sql")] -pub(crate) struct RequestMetrics { - /// Latency of a DB request. - #[metrics(buckets = Buckets::LATENCIES, labels = ["method"])] - pub request: LabeledFamily<&'static str, Histogram>, - /// Counter of slow DB requests. - #[metrics(labels = ["method"])] - pub request_slow: LabeledFamily<&'static str, Counter>, - /// Counter of errored DB requests. - #[metrics(labels = ["method"])] - pub request_error: LabeledFamily<&'static str, Counter>, -} - -#[vise::register] -pub(crate) static REQUEST_METRICS: vise::Global = vise::Global::new(); - -/// Reporter of latency for DAL methods consisting of multiple DB queries. If there's a single query, -/// use `.instrument().report_latency()` on it instead. -/// -/// Should be created at the start of the relevant method and dropped when the latency needs to be reported. -#[derive(Debug)] -pub(crate) struct MethodLatency(Option>); - -impl MethodLatency { - pub fn new(name: &'static str) -> Self { - Self(Some(REQUEST_METRICS.request[&name].start())) - } -} - -impl Drop for MethodLatency { - fn drop(&mut self) { - if !thread::panicking() { - let observer = self.0.take().unwrap(); - // `unwrap()` is safe; the observer is only taken out on drop - observer.observe(); - } - } -} - -/// Kind of a connection error. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] -#[metrics(label = "kind", rename_all = "snake_case")] -pub(crate) enum ConnectionErrorKind { - Timeout, - Database, - Io, - Other, -} - -impl From<&sqlx::Error> for ConnectionErrorKind { - fn from(err: &sqlx::Error) -> Self { - match err { - sqlx::Error::PoolTimedOut => Self::Timeout, - sqlx::Error::Database(_) => Self::Database, - sqlx::Error::Io(_) => Self::Io, - _ => Self::Other, - } - } -} - -const POOL_SIZE_BUCKETS: Buckets = Buckets::linear(0.0..=100.0, 10.0); - -/// Connection-related metrics. -#[derive(Debug, Metrics)] -#[metrics(prefix = "sql_connection")] -pub(crate) struct ConnectionMetrics { - /// Latency of acquiring a DB connection. - #[metrics(buckets = Buckets::LATENCIES)] - pub acquire: Histogram, - /// Latency of acquiring a DB connection, tagged with the requester label. - #[metrics(buckets = Buckets::LATENCIES, labels = ["requester"])] - pub acquire_tagged: LabeledFamily<&'static str, Histogram>, - /// Current DB pool size. - #[metrics(buckets = POOL_SIZE_BUCKETS)] - pub pool_size: Histogram, - /// Current number of idle connections in the DB pool. - #[metrics(buckets = POOL_SIZE_BUCKETS)] - pub pool_idle: Histogram, - /// Number of errors occurred when acquiring a DB connection. - pub pool_acquire_error: Family, - /// Lifetime of a DB connection, tagged with the requester label. - #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds, labels = ["requester"])] - pub lifetime: LabeledFamily<&'static str, Histogram>, -} - -#[vise::register] -pub(crate) static CONNECTION_METRICS: vise::Global = vise::Global::new(); +use crate::{Server, ServerDals}; #[derive(Debug, Metrics)] #[metrics(prefix = "postgres")] -pub(crate) struct PostgresMetrics { +pub struct PostgresMetrics { /// Size of the data in a certain table as returned by `pg_table_size` function. #[metrics(unit = Unit::Bytes, labels = ["table"])] table_data_size: LabeledFamily>, @@ -121,7 +28,7 @@ pub(crate) struct PostgresMetrics { static POSTGRES_METRICS: vise::Global = vise::Global::new(); impl PostgresMetrics { - pub(crate) async fn run_scraping(pool: ConnectionPool, scrape_interval: Duration) { + pub async fn run_scraping(pool: ConnectionPool, scrape_interval: Duration) { let scrape_timeout = Duration::from_secs(1).min(scrape_interval / 2); loop { match tokio::time::timeout(scrape_timeout, Self::scrape(&pool)).await { @@ -137,7 +44,7 @@ impl PostgresMetrics { } } - async fn scrape(pool: &ConnectionPool) -> anyhow::Result<()> { + async fn scrape(pool: &ConnectionPool) -> anyhow::Result<()> { let mut storage = pool .access_storage_tagged("postgres_metrics") .await diff --git a/core/lib/dal/src/models/consensus/mod.rs b/core/lib/dal/src/models/consensus/mod.rs index 1b2cf9ca3a65..a2484ed7b862 100644 --- a/core/lib/dal/src/models/consensus/mod.rs +++ b/core/lib/dal/src/models/consensus/mod.rs @@ -8,7 +8,7 @@ use zksync_types::{ fee::Fee, l1::{OpProcessingType, PriorityQueueType}, l2::TransactionType, - protocol_version::ProtocolUpgradeTxCommonData, + protocol_upgrade::ProtocolUpgradeTxCommonData, transaction_request::PaymasterParams, Address, Execute, ExecuteTransactionCommon, InputData, L1BatchNumber, L1TxCommonData, L2TxCommonData, Nonce, PriorityOpId, ProtocolVersionId, Transaction, H256, diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index fac5220c96f6..7e32c87f0e6a 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -6,7 +6,6 @@ pub mod storage_event; pub mod storage_fee_monitor; pub mod storage_log; pub mod storage_protocol_version; -pub mod storage_prover_job_info; pub mod storage_sync; pub mod storage_transaction; pub mod storage_verification_request; diff --git a/core/lib/dal/src/models/storage_protocol_version.rs b/core/lib/dal/src/models/storage_protocol_version.rs index 09efaabd68fe..2c8c6760ade8 100644 --- a/core/lib/dal/src/models/storage_protocol_version.rs +++ b/core/lib/dal/src/models/storage_protocol_version.rs @@ -4,7 +4,8 @@ use sqlx::types::chrono::NaiveDateTime; use zksync_contracts::BaseSystemContractsHashes; use zksync_types::{ api, - protocol_version::{self, L1VerifierConfig, ProtocolUpgradeTx, VerifierParams}, + protocol_upgrade::{self, ProtocolUpgradeTx}, + protocol_version::{L1VerifierConfig, VerifierParams}, H256, }; @@ -27,8 +28,8 @@ pub struct StorageProtocolVersion { pub(crate) fn protocol_version_from_storage( storage_version: StorageProtocolVersion, tx: Option, -) -> protocol_version::ProtocolVersion { - protocol_version::ProtocolVersion { +) -> protocol_upgrade::ProtocolVersion { + protocol_upgrade::ProtocolVersion { id: (storage_version.id as u16).try_into().unwrap(), timestamp: storage_version.timestamp as u64, l1_verifier_config: L1VerifierConfig { diff --git a/core/lib/dal/src/models/storage_prover_job_info.rs b/core/lib/dal/src/models/storage_prover_job_info.rs deleted file mode 100644 index efe6e8cb69d9..000000000000 --- a/core/lib/dal/src/models/storage_prover_job_info.rs +++ /dev/null @@ -1,72 +0,0 @@ -use std::{convert::TryFrom, panic, str::FromStr}; - -use sqlx::types::chrono::{DateTime, NaiveDateTime, NaiveTime, Utc}; -use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; - -use crate::fri_prover_dal::types::{ - JobPosition, ProverJobInfo, ProverJobStatus, ProverJobStatusFailed, ProverJobStatusInProgress, - ProverJobStatusSuccessful, -}; - -#[derive(sqlx::FromRow)] -pub struct StorageProverJobInfo { - pub id: i64, - pub l1_batch_number: i64, - pub circuit_type: String, - pub status: String, - pub aggregation_round: i32, - pub sequence_number: i32, - pub input_length: i32, - pub attempts: i32, - pub created_at: NaiveDateTime, - pub updated_at: NaiveDateTime, - pub processing_started_at: Option, - pub time_taken: Option, - pub error: Option, -} - -impl From for ProverJobInfo { - fn from(x: StorageProverJobInfo) -> Self { - fn nt2d(nt: NaiveDateTime) -> DateTime { - DateTime::from_naive_utc_and_offset(nt, Utc) - } - - let status = match ProverJobStatus::from_str(x.status.as_str()) - .unwrap_or_else(|_| panic!("Unknown value '{}' in prover job status.", x.status)) - { - ProverJobStatus::InProgress(_) => { - ProverJobStatus::InProgress(ProverJobStatusInProgress { - started_at: nt2d(x.processing_started_at.unwrap()), - }) - } - ProverJobStatus::Successful(_) => { - ProverJobStatus::Successful(ProverJobStatusSuccessful { - started_at: nt2d(x.processing_started_at.unwrap()), - time_taken: x.time_taken.unwrap() - NaiveTime::from_hms_opt(0, 0, 0).unwrap(), - }) - } - ProverJobStatus::Failed(_) => ProverJobStatus::Failed(ProverJobStatusFailed { - started_at: nt2d(x.processing_started_at.unwrap()), - error: x.error.unwrap_or_else(|| { - panic!("Error must be present on failed prover job records.") - }), - }), - x => x, - }; - - ProverJobInfo { - id: x.id as u32, - block_number: L1BatchNumber(x.l1_batch_number as u32), - circuit_type: x.circuit_type, - position: JobPosition { - aggregation_round: AggregationRound::try_from(x.aggregation_round).unwrap(), - sequence_number: x.sequence_number as usize, - }, - input_length: x.input_length as u64, - status, - attempts: x.attempts as u32, - created_at: nt2d(x.created_at), - updated_at: nt2d(x.updated_at), - } - } -} diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 9f4921e78c67..74c8db2e9db9 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -8,7 +8,7 @@ use zksync_types::{ fee::Fee, l1::{OpProcessingType, PriorityQueueType}, l2::TransactionType, - protocol_version::ProtocolUpgradeTxCommonData, + protocol_upgrade::ProtocolUpgradeTxCommonData, transaction_request::PaymasterParams, vm_trace::Call, web3::types::U64, diff --git a/core/lib/dal/src/models/storage_witness_job_info.rs b/core/lib/dal/src/models/storage_witness_job_info.rs index ea8e15fb9c99..03d1120b7170 100644 --- a/core/lib/dal/src/models/storage_witness_job_info.rs +++ b/core/lib/dal/src/models/storage_witness_job_info.rs @@ -1,11 +1,13 @@ use std::{convert::TryFrom, str::FromStr}; use sqlx::types::chrono::{DateTime, NaiveDateTime, NaiveTime, Utc}; -use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; - -use crate::fri_prover_dal::types::{ - JobPosition, WitnessJobInfo, WitnessJobStatus, WitnessJobStatusFailed, - WitnessJobStatusSuccessful, +use zksync_types::{ + basic_fri_types::AggregationRound, + prover_dal::{ + JobPosition, WitnessJobInfo, WitnessJobStatus, WitnessJobStatusFailed, + WitnessJobStatusSuccessful, + }, + L1BatchNumber, }; #[derive(sqlx::FromRow)] diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index 61d5ba583e9c..5543a2daceb6 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -2,13 +2,14 @@ use std::time::Duration; use strum::{Display, EnumString}; +use zksync_db_connection::{processor::StorageProcessor, utils::pg_interval_from_duration}; use zksync_types::L1BatchNumber; -use crate::{time_utils::pg_interval_from_duration, SqlxError, StorageProcessor}; +use crate::{Server, SqlxError}; #[derive(Debug)] pub struct ProofGenerationDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } #[derive(Debug, EnumString, Display)] diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index 5a9a973893c0..65e283146cee 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -2,19 +2,21 @@ use std::convert::TryInto; use anyhow::Context as _; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; +use zksync_db_connection::processor::StorageProcessor; use zksync_types::{ - protocol_version::{L1VerifierConfig, ProtocolUpgradeTx, ProtocolVersion, VerifierParams}, + protocol_upgrade::{ProtocolUpgradeTx, ProtocolVersion}, + protocol_version::{L1VerifierConfig, VerifierParams}, ProtocolVersionId, H256, }; use crate::{ models::storage_protocol_version::{protocol_version_from_storage, StorageProtocolVersion}, - StorageProcessor, + Server, ServerDals, }; #[derive(Debug)] pub struct ProtocolVersionsDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub storage: &'a mut StorageProcessor<'c, Server>, } impl ProtocolVersionsDal<'_, '_> { diff --git a/core/lib/dal/src/protocol_versions_web3_dal.rs b/core/lib/dal/src/protocol_versions_web3_dal.rs index 893a7e041df1..cc3efb802bff 100644 --- a/core/lib/dal/src/protocol_versions_web3_dal.rs +++ b/core/lib/dal/src/protocol_versions_web3_dal.rs @@ -1,10 +1,11 @@ +use zksync_db_connection::processor::StorageProcessor; use zksync_types::api::ProtocolVersion; -use crate::{models::storage_protocol_version::StorageProtocolVersion, StorageProcessor}; +use crate::{models::storage_protocol_version::StorageProtocolVersion, Server}; #[derive(Debug)] pub struct ProtocolVersionsWeb3Dal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub storage: &'a mut StorageProcessor<'c, Server>, } impl ProtocolVersionsWeb3Dal<'_, '_> { diff --git a/core/lib/dal/src/snapshot_recovery_dal.rs b/core/lib/dal/src/snapshot_recovery_dal.rs index c0fe61802de4..27bcecca1cd2 100644 --- a/core/lib/dal/src/snapshot_recovery_dal.rs +++ b/core/lib/dal/src/snapshot_recovery_dal.rs @@ -1,12 +1,13 @@ +use zksync_db_connection::processor::StorageProcessor; use zksync_types::{ snapshots::SnapshotRecoveryStatus, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, }; -use crate::StorageProcessor; +use crate::Server; #[derive(Debug)] pub struct SnapshotRecoveryDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } impl SnapshotRecoveryDal<'_, '_> { @@ -105,11 +106,11 @@ mod tests { snapshots::SnapshotRecoveryStatus, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, }; - use crate::ConnectionPool; + use crate::{ConnectionPool, Server, ServerDals}; #[tokio::test] async fn manipulating_snapshot_recovery_table() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); let mut applied_status_dal = conn.snapshot_recovery_dal(); let empty_status = applied_status_dal diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs index ba8a2d836b52..44055c39e757 100644 --- a/core/lib/dal/src/snapshots_creator_dal.rs +++ b/core/lib/dal/src/snapshots_creator_dal.rs @@ -1,13 +1,14 @@ +use zksync_db_connection::{instrument::InstrumentExt, processor::StorageProcessor}; use zksync_types::{ snapshots::SnapshotStorageLog, AccountTreeId, Address, L1BatchNumber, MiniblockNumber, StorageKey, H256, }; -use crate::{instrument::InstrumentExt, StorageProcessor}; +use crate::Server; #[derive(Debug)] pub struct SnapshotsCreatorDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } impl SnapshotsCreatorDal<'_, '_> { @@ -145,11 +146,11 @@ mod tests { use zksync_types::StorageLog; use super::*; - use crate::ConnectionPool; + use crate::{ConnectionPool, Server, ServerDals}; #[tokio::test] async fn getting_storage_log_chunks_basics() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let logs = (0..100).map(|i| { @@ -221,7 +222,7 @@ mod tests { } async fn assert_logs_for_snapshot( - conn: &mut StorageProcessor<'_>, + conn: &mut StorageProcessor<'_, Server>, miniblock_number: MiniblockNumber, l1_batch_number: L1BatchNumber, expected_logs: &[StorageLog], @@ -261,7 +262,7 @@ mod tests { #[tokio::test] async fn phantom_writes_are_filtered_out() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let key = StorageKey::new(AccountTreeId::default(), H256::repeat_byte(1)); diff --git a/core/lib/dal/src/snapshots_dal.rs b/core/lib/dal/src/snapshots_dal.rs index 7f790a0a0044..cc707837b2ef 100644 --- a/core/lib/dal/src/snapshots_dal.rs +++ b/core/lib/dal/src/snapshots_dal.rs @@ -1,9 +1,10 @@ +use zksync_db_connection::{instrument::InstrumentExt, processor::StorageProcessor}; use zksync_types::{ snapshots::{AllSnapshots, SnapshotMetadata}, L1BatchNumber, }; -use crate::{instrument::InstrumentExt, StorageProcessor}; +use crate::Server; #[derive(Debug, sqlx::FromRow)] struct StorageSnapshotMetadata { @@ -28,7 +29,7 @@ impl From for SnapshotMetadata { #[derive(Debug)] pub struct SnapshotsDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } impl SnapshotsDal<'_, '_> { @@ -170,11 +171,11 @@ impl SnapshotsDal<'_, '_> { mod tests { use zksync_types::L1BatchNumber; - use crate::ConnectionPool; + use crate::{ConnectionPool, Server, ServerDals}; #[tokio::test] async fn adding_snapshot() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let mut dal = conn.snapshots_dal(); let l1_batch_number = L1BatchNumber(100); @@ -214,7 +215,7 @@ mod tests { #[tokio::test] async fn adding_files() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let mut dal = conn.snapshots_dal(); let l1_batch_number = L1BatchNumber(100); diff --git a/core/lib/dal/src/storage_dal.rs b/core/lib/dal/src/storage_dal.rs index 10a32e7c7975..b61a4b170207 100644 --- a/core/lib/dal/src/storage_dal.rs +++ b/core/lib/dal/src/storage_dal.rs @@ -1,13 +1,14 @@ use std::collections::HashMap; use itertools::Itertools; +use zksync_db_connection::processor::StorageProcessor; use zksync_types::{StorageKey, StorageLog, StorageValue, H256}; -use crate::StorageProcessor; +use crate::Server; #[derive(Debug)] pub struct StorageDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } #[deprecated(note = "Soft-removed in favor of `storage_logs`; don't use")] @@ -98,12 +99,12 @@ mod tests { use zksync_types::{AccountTreeId, Address}; use super::*; - use crate::ConnectionPool; + use crate::{ConnectionPool, Server, ServerDals}; #[allow(deprecated)] #[tokio::test] async fn applying_storage_logs() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let account = AccountTreeId::new(Address::repeat_byte(1)); diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index e9c11c0fec16..e69ac87313d3 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -1,17 +1,20 @@ use std::{collections::HashMap, ops, time::Instant}; use sqlx::types::chrono::Utc; +use zksync_db_connection::{ + instrument::InstrumentExt, processor::StorageProcessor, write_str, writeln_str, +}; use zksync_types::{ get_code_key, snapshots::SnapshotStorageLog, AccountTreeId, Address, L1BatchNumber, MiniblockNumber, StorageKey, StorageLog, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H160, H256, }; pub use crate::models::storage_log::{DbStorageLog, StorageRecoveryLogEntry}; -use crate::{instrument::InstrumentExt, StorageProcessor}; +use crate::{Server, ServerDals}; #[derive(Debug)] pub struct StorageLogsDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } impl StorageLogsDal<'_, '_> { @@ -758,9 +761,13 @@ mod tests { use zksync_types::{block::L1BatchHeader, ProtocolVersion, ProtocolVersionId}; use super::*; - use crate::{tests::create_miniblock_header, ConnectionPool}; + use crate::{tests::create_miniblock_header, ConnectionPool, Server}; - async fn insert_miniblock(conn: &mut StorageProcessor<'_>, number: u32, logs: Vec) { + async fn insert_miniblock( + conn: &mut StorageProcessor<'_, Server>, + number: u32, + logs: Vec, + ) { let header = L1BatchHeader::new( L1BatchNumber(number), 0, @@ -791,7 +798,7 @@ mod tests { #[tokio::test] async fn inserting_storage_logs() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -836,7 +843,7 @@ mod tests { } async fn test_rollback( - conn: &mut StorageProcessor<'_>, + conn: &mut StorageProcessor<'_, Server>, key: StorageKey, second_key: StorageKey, ) { @@ -914,7 +921,7 @@ mod tests { #[tokio::test] async fn getting_storage_logs_for_revert() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -964,7 +971,7 @@ mod tests { #[tokio::test] async fn reverting_keys_without_initial_write() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -1032,7 +1039,7 @@ mod tests { #[tokio::test] async fn getting_starting_entries_in_chunks() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let sorted_hashed_keys = prepare_tree_entries(&mut conn, 100).await; @@ -1065,7 +1072,7 @@ mod tests { } } - async fn prepare_tree_entries(conn: &mut StorageProcessor<'_>, count: u8) -> Vec { + async fn prepare_tree_entries(conn: &mut StorageProcessor<'_, Server>, count: u8) -> Vec { conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -1093,7 +1100,7 @@ mod tests { #[tokio::test] async fn getting_tree_entries() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let sorted_hashed_keys = prepare_tree_entries(&mut conn, 10).await; @@ -1135,7 +1142,7 @@ mod tests { FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, ); - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); // If deployment fails then two writes are issued, one that writes `bytecode_hash` to the "correct" value, // and the next write reverts its value back to `FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH`. diff --git a/core/lib/dal/src/storage_logs_dedup_dal.rs b/core/lib/dal/src/storage_logs_dedup_dal.rs index 0994193c8975..a8033e8b7e90 100644 --- a/core/lib/dal/src/storage_logs_dedup_dal.rs +++ b/core/lib/dal/src/storage_logs_dedup_dal.rs @@ -1,6 +1,7 @@ use std::collections::HashSet; use sqlx::types::chrono::Utc; +use zksync_db_connection::processor::StorageProcessor; use zksync_types::{ snapshots::SnapshotStorageLog, zk_evm_types::LogQuery, AccountTreeId, Address, L1BatchNumber, StorageKey, H256, @@ -8,11 +9,11 @@ use zksync_types::{ use zksync_utils::u256_to_h256; pub use crate::models::storage_log::DbInitialWrite; -use crate::StorageProcessor; +use crate::Server; #[derive(Debug)] pub struct StorageLogsDedupDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } impl StorageLogsDedupDal<'_, '_> { diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index 211abfe2a6c2..928402b48fc9 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; +use zksync_db_connection::{instrument::InstrumentExt, processor::StorageProcessor}; use zksync_types::{ get_code_key, get_nonce_key, utils::{decompose_full_nonce, storage_key_for_standard_token_balance}, @@ -8,14 +9,11 @@ use zksync_types::{ }; use zksync_utils::h256_to_u256; -use crate::{ - instrument::InstrumentExt, models::storage_block::ResolvedL1BatchForMiniblock, SqlxError, - StorageProcessor, -}; +use crate::{models::storage_block::ResolvedL1BatchForMiniblock, Server, ServerDals, SqlxError}; #[derive(Debug)] pub struct StorageWeb3Dal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } impl StorageWeb3Dal<'_, '_> { @@ -273,12 +271,12 @@ mod tests { use super::*; use crate::{ tests::{create_miniblock_header, create_snapshot_recovery}, - ConnectionPool, + ConnectionPool, Server, ServerDals, }; #[tokio::test] async fn resolving_l1_batch_number_of_miniblock() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -345,7 +343,7 @@ mod tests { #[tokio::test] async fn resolving_l1_batch_number_of_miniblock_with_snapshot_recovery() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index ebb4e9a15dc9..590cae3b40ed 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -1,16 +1,17 @@ +use zksync_db_connection::{ + instrument::InstrumentExt, metrics::MethodLatency, processor::StorageProcessor, +}; use zksync_types::{api::en, MiniblockNumber}; use crate::{ - instrument::InstrumentExt, - metrics::MethodLatency, models::storage_sync::{StorageSyncBlock, SyncBlock}, - StorageProcessor, + Server, ServerDals, }; /// DAL subset dedicated to the EN synchronization. #[derive(Debug)] pub struct SyncDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub storage: &'a mut StorageProcessor<'c, Server>, } impl SyncDal<'_, '_> { @@ -118,12 +119,12 @@ mod tests { create_miniblock_header, create_snapshot_recovery, mock_execution_result, mock_l2_transaction, }, - ConnectionPool, + ConnectionPool, Server, }; #[tokio::test] async fn sync_block_basics() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); // Simulate genesis. @@ -239,7 +240,7 @@ mod tests { #[tokio::test] async fn sync_block_after_snapshot_recovery() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); // Simulate snapshot recovery. diff --git a/core/lib/dal/src/system_dal.rs b/core/lib/dal/src/system_dal.rs index 5a30abada460..b72ec249aa00 100644 --- a/core/lib/dal/src/system_dal.rs +++ b/core/lib/dal/src/system_dal.rs @@ -1,8 +1,7 @@ use std::collections::HashMap; use sqlx::Row; - -use crate::{instrument::InstrumentExt, StorageProcessor}; +use zksync_db_connection::{instrument::InstrumentExt, processor::StorageProcessor}; #[derive(Debug)] pub(crate) struct TableSize { @@ -11,9 +10,10 @@ pub(crate) struct TableSize { pub relation_size: u64, pub total_size: u64, } +use crate::Server; pub struct SystemDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub storage: &'a mut StorageProcessor<'c, Server>, } impl SystemDal<'_, '_> { diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 075eca76220d..96270121eb69 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -1,6 +1,7 @@ use std::time::Duration; use zksync_contracts::BaseSystemContractsHashes; +use zksync_db_connection::connection::ConnectionPool; use zksync_types::{ block::{MiniblockHasher, MiniblockHeader}, fee::{Fee, TransactionExecutionMetrics}, @@ -8,7 +9,7 @@ use zksync_types::{ helpers::unix_timestamp_ms, l1::{L1Tx, OpProcessingType, PriorityQueueType}, l2::L2Tx, - protocol_version::{ProtocolUpgradeTx, ProtocolUpgradeTxCommonData}, + protocol_upgrade::{ProtocolUpgradeTx, ProtocolUpgradeTxCommonData}, snapshots::SnapshotRecoveryStatus, tx::{tx_execution_info::TxExecutionStatus, ExecutionMetrics, TransactionExecutionResult}, Address, Execute, L1BatchNumber, L1BlockNumber, L1TxCommonData, L2ChainId, MiniblockNumber, @@ -17,10 +18,10 @@ use zksync_types::{ use crate::{ blocks_dal::BlocksDal, - connection::ConnectionPool, protocol_versions_dal::ProtocolVersionsDal, transactions_dal::{L2TxSubmissionResult, TransactionsDal}, transactions_web3_dal::TransactionsWeb3Dal, + Server, }; const DEFAULT_GAS_PER_PUBDATA: u32 = 100; @@ -164,7 +165,7 @@ pub(crate) fn create_snapshot_recovery() -> SnapshotRecoveryStatus { #[tokio::test] async fn workflow_with_submit_tx_equal_hashes() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let storage = &mut connection_pool.access_storage().await.unwrap(); let mut transactions_dal = TransactionsDal { storage }; @@ -184,7 +185,7 @@ async fn workflow_with_submit_tx_equal_hashes() { #[tokio::test] async fn workflow_with_submit_tx_diff_hashes() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let storage = &mut connection_pool.access_storage().await.unwrap(); let mut transactions_dal = TransactionsDal { storage }; @@ -211,7 +212,7 @@ async fn workflow_with_submit_tx_diff_hashes() { #[tokio::test] async fn remove_stuck_txs() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let storage = &mut connection_pool.access_storage().await.unwrap(); let mut protocol_versions_dal = ProtocolVersionsDal { storage }; protocol_versions_dal diff --git a/core/lib/dal/src/tokens_dal.rs b/core/lib/dal/src/tokens_dal.rs index 67a5bb36728a..2456e72ed879 100644 --- a/core/lib/dal/src/tokens_dal.rs +++ b/core/lib/dal/src/tokens_dal.rs @@ -1,11 +1,12 @@ use sqlx::types::chrono::Utc; +use zksync_db_connection::{processor::StorageProcessor, write_str, writeln_str}; use zksync_types::{tokens::TokenInfo, Address, MiniblockNumber}; -use crate::StorageProcessor; +use crate::{Server, ServerDals}; #[derive(Debug)] pub struct TokensDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } impl TokensDal<'_, '_> { @@ -111,7 +112,7 @@ mod tests { use zksync_types::{get_code_key, tokens::TokenMetadata, StorageLog, H256}; use super::*; - use crate::ConnectionPool; + use crate::{ConnectionPool, Server, ServerDals}; fn test_token_info() -> TokenInfo { TokenInfo { @@ -139,7 +140,7 @@ mod tests { #[tokio::test] async fn adding_and_getting_tokens() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let tokens = [test_token_info(), eth_token_info()]; storage.tokens_dal().add_tokens(&tokens).await.unwrap(); @@ -186,7 +187,7 @@ mod tests { #[tokio::test] async fn rolling_back_tokens() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let eth_info = eth_token_info(); @@ -256,7 +257,7 @@ mod tests { ); } - async fn test_getting_all_tokens(storage: &mut StorageProcessor<'_>) { + async fn test_getting_all_tokens(storage: &mut StorageProcessor<'_, Server>) { for at_miniblock in [None, Some(MiniblockNumber(2)), Some(MiniblockNumber(100))] { let all_tokens = storage .tokens_web3_dal() @@ -280,7 +281,7 @@ mod tests { #[tokio::test] async fn rolling_back_tokens_with_failed_deployment() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let test_info = test_token_info(); diff --git a/core/lib/dal/src/tokens_web3_dal.rs b/core/lib/dal/src/tokens_web3_dal.rs index 5bd3cab089ee..efad971b4fb1 100644 --- a/core/lib/dal/src/tokens_web3_dal.rs +++ b/core/lib/dal/src/tokens_web3_dal.rs @@ -1,9 +1,10 @@ +use zksync_db_connection::processor::StorageProcessor; use zksync_types::{ tokens::{TokenInfo, TokenMetadata}, Address, MiniblockNumber, }; -use crate::StorageProcessor; +use crate::{Server, ServerDals}; #[derive(Debug)] struct StorageTokenInfo { @@ -30,7 +31,7 @@ impl From for TokenInfo { #[derive(Debug)] pub struct TokensWeb3Dal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } impl TokensWeb3Dal<'_, '_> { diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index eada9dfa44ab..a4a4ffbe7bdd 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -4,12 +4,15 @@ use anyhow::Context as _; use bigdecimal::BigDecimal; use itertools::Itertools; use sqlx::{error, types::chrono::NaiveDateTime}; +use zksync_db_connection::{ + instrument::InstrumentExt, processor::StorageProcessor, utils::pg_interval_from_duration, +}; use zksync_types::{ block::MiniblockExecutionData, fee::TransactionExecutionMetrics, l1::L1Tx, l2::L2Tx, - protocol_version::ProtocolUpgradeTx, + protocol_upgrade::ProtocolUpgradeTx, tx::{tx_execution_info::TxExecutionStatus, TransactionExecutionResult}, vm_trace::Call, Address, ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, MiniblockNumber, PriorityOpId, @@ -18,10 +21,8 @@ use zksync_types::{ use zksync_utils::u256_to_big_decimal; use crate::{ - instrument::InstrumentExt, models::storage_transaction::{CallTrace, StorageTransaction}, - time_utils::pg_interval_from_duration, - StorageProcessor, + Server, }; #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] @@ -47,7 +48,7 @@ impl fmt::Display for L2TxSubmissionResult { #[derive(Debug)] pub struct TransactionsDal<'c, 'a> { - pub(crate) storage: &'c mut StorageProcessor<'a>, + pub(crate) storage: &'c mut StorageProcessor<'a, Server>, } type TxLocations = Vec<(MiniblockNumber, Vec<(H256, u32, u16)>)>; @@ -76,6 +77,7 @@ impl TransactionsDal<'_, '_> { let secs = (tx.received_timestamp_ms / 1000) as i64; let nanosecs = ((tx.received_timestamp_ms % 1000) * 1_000_000) as u32; + #[allow(deprecated)] let received_at = NaiveDateTime::from_timestamp_opt(secs, nanosecs).unwrap(); sqlx::query!( @@ -176,6 +178,8 @@ impl TransactionsDal<'_, '_> { let secs = (tx.received_timestamp_ms / 1000) as i64; let nanosecs = ((tx.received_timestamp_ms % 1000) * 1_000_000) as u32; + + #[allow(deprecated)] let received_at = NaiveDateTime::from_timestamp_opt(secs, nanosecs).unwrap(); sqlx::query!( @@ -296,6 +300,7 @@ impl TransactionsDal<'_, '_> { let paymaster_input = tx.common_data.paymaster_params.paymaster_input; let secs = (tx.received_timestamp_ms / 1000) as i64; let nanosecs = ((tx.received_timestamp_ms % 1000) * 1_000_000) as u32; + #[allow(deprecated)] let received_at = NaiveDateTime::from_timestamp_opt(secs, nanosecs).unwrap(); // Besides just adding or updating(on conflict) the record, we want to extract some info // from the query below, to indicate what actually happened: @@ -1333,12 +1338,12 @@ mod tests { use super::*; use crate::{ tests::{create_miniblock_header, mock_execution_result, mock_l2_transaction}, - ConnectionPool, + ConnectionPool, Server, ServerDals, }; #[tokio::test] async fn getting_call_trace_for_transaction() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index 034983b2b889..718b609354a6 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -1,16 +1,18 @@ use sqlx::types::chrono::NaiveDateTime; +use zksync_db_connection::{ + instrument::InstrumentExt, interpolate_query, match_query_as, processor::StorageProcessor, +}; use zksync_types::{ api, api::TransactionReceipt, Address, L2ChainId, MiniblockNumber, Transaction, ACCOUNT_CODE_STORAGE_ADDRESS, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H256, U256, }; use crate::{ - instrument::InstrumentExt, models::storage_transaction::{ StorageApiTransaction, StorageTransaction, StorageTransactionDetails, StorageTransactionReceipt, }, - SqlxError, StorageProcessor, + Server, ServerDals, SqlxError, }; #[derive(Debug, Clone, Copy)] @@ -21,7 +23,7 @@ enum TransactionSelector<'a> { #[derive(Debug)] pub struct TransactionsWeb3Dal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Server>, } impl TransactionsWeb3Dal<'_, '_> { @@ -395,10 +397,10 @@ mod tests { use super::*; use crate::{ tests::{create_miniblock_header, mock_execution_result, mock_l2_transaction}, - ConnectionPool, + ConnectionPool, Server, ServerDals, }; - async fn prepare_transactions(conn: &mut StorageProcessor<'_>, txs: Vec) { + async fn prepare_transactions(conn: &mut StorageProcessor<'_, Server>, txs: Vec) { conn.blocks_dal() .delete_miniblocks(MiniblockNumber(0)) .await @@ -432,7 +434,7 @@ mod tests { #[tokio::test] async fn getting_transaction() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -482,7 +484,7 @@ mod tests { #[tokio::test] async fn getting_receipts() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -510,7 +512,7 @@ mod tests { #[tokio::test] async fn getting_miniblock_transactions() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -537,7 +539,7 @@ mod tests { #[tokio::test] async fn getting_next_nonce_by_initiator_account() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -607,7 +609,7 @@ mod tests { #[tokio::test] async fn getting_next_nonce_by_initiator_account_after_snapshot_recovery() { // Emulate snapshot recovery: no transactions with past nonces are present in the storage - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); let initiator = Address::repeat_byte(1); let next_nonce = conn diff --git a/core/lib/db_connection/Cargo.toml b/core/lib/db_connection/Cargo.toml new file mode 100644 index 000000000000..03ca983f2db9 --- /dev/null +++ b/core/lib/db_connection/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "zksync_db_connection" +version = "0.1.0" +edition = "2021" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] + +[dependencies] +zksync_health_check = { path = "../health_check" } + +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +sqlx = { version = "0.7.3", default-features = false, features = [ + "runtime-tokio", + "tls-native-tls", + "macros", + "postgres", + "bigdecimal", + "rust_decimal", + "chrono", + "json", + "migrate", + "ipnetwork", +] } +vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" } +tokio = { version = "1", features = ["full"] } +anyhow = "1.0" +url = "2" +rand = "0.8" +tracing = "0.1" + +[dev-dependencies] +assert_matches = "1.5.0" +zksync_basic_types = { path = "../basic_types" } diff --git a/core/lib/dal/src/connection/mod.rs b/core/lib/db_connection/src/connection.rs similarity index 89% rename from core/lib/dal/src/connection/mod.rs rename to core/lib/db_connection/src/connection.rs index f81edef267c6..a9330c00cdf1 100644 --- a/core/lib/dal/src/connection/mod.rs +++ b/core/lib/db_connection/src/connection.rs @@ -1,6 +1,7 @@ use std::{ env, fmt, future::Future, + marker::PhantomData, panic::Location, sync::{ atomic::{AtomicU64, Ordering}, @@ -16,23 +17,22 @@ use sqlx::{ postgres::{PgConnectOptions, PgPool, PgPoolOptions, Postgres}, }; -pub use self::processor::StorageProcessor; -pub(crate) use self::processor::StorageProcessorTags; -use self::processor::TracedConnections; -use crate::metrics::{PostgresMetrics, CONNECTION_METRICS}; - -mod processor; +use crate::{ + metrics::CONNECTION_METRICS, + processor::{StorageMarker, StorageProcessor, StorageProcessorTags, TracedConnections}, +}; /// Builder for [`ConnectionPool`]s. #[derive(Clone)] -pub struct ConnectionPoolBuilder { +pub struct ConnectionPoolBuilder { database_url: String, max_size: u32, acquire_timeout: Duration, statement_timeout: Option, + _marker: PhantomData, } -impl fmt::Debug for ConnectionPoolBuilder { +impl fmt::Debug for ConnectionPoolBuilder { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { // Database URL is potentially sensitive, thus we omit it. formatter @@ -44,7 +44,7 @@ impl fmt::Debug for ConnectionPoolBuilder { } } -impl ConnectionPoolBuilder { +impl ConnectionPoolBuilder { /// Overrides the maximum number of connections that can be allocated by the pool. pub fn set_max_size(&mut self, max_size: u32) -> &mut Self { self.max_size = max_size; @@ -75,7 +75,7 @@ impl ConnectionPoolBuilder { } /// Builds a connection pool from this builder. - pub async fn build(&self) -> anyhow::Result { + pub async fn build(&self) -> anyhow::Result> { let options = PgPoolOptions::new() .max_connections(self.max_size) .acquire_timeout(self.acquire_timeout); @@ -97,8 +97,21 @@ impl ConnectionPoolBuilder { inner: pool, max_size: self.max_size, traced_connections: None, + _marker: Default::default(), }) } + + /// Builds a connection pool that has a single connection. + pub async fn build_singleton(&self) -> anyhow::Result> { + let singleton_builder = Self { + database_url: self.database_url.clone(), + max_size: 1, + acquire_timeout: self.acquire_timeout, + statement_timeout: self.statement_timeout, + _marker: self._marker, + }; + singleton_builder.build().await + } } #[derive(Debug)] @@ -143,7 +156,7 @@ impl TestTemplate { /// Closes the connection pool, disallows connecting to the underlying db, /// so that the db can be used as a template. - pub async fn freeze(pool: ConnectionPool) -> anyhow::Result { + pub async fn freeze(pool: ConnectionPool) -> anyhow::Result { use sqlx::Executor as _; let mut conn = pool.acquire_connection_retried(None).await?; conn.execute( @@ -164,7 +177,10 @@ impl TestTemplate { /// whenever you write to the DBs, therefore making it as fast as an in-memory Postgres instance. /// The database is not cleaned up automatically, but rather the whole Postgres /// container is recreated whenever you call "zk test rust". - pub async fn create_db(&self, connections: u32) -> anyhow::Result { + pub async fn create_db( + &self, + connections: u32, + ) -> anyhow::Result> { use sqlx::Executor as _; let mut conn = Self::connect_to(&self.url("")) @@ -176,7 +192,7 @@ impl TestTemplate { .await .context("CREATE DATABASE")?; - Ok(ConnectionPool::builder( + Ok(ConnectionPool::::builder( self.url(&db_new).as_ref(), connections, )) @@ -229,14 +245,15 @@ impl GlobalConnectionPoolConfig { } #[derive(Clone)] -pub struct ConnectionPool { +pub struct ConnectionPool { pub(crate) inner: PgPool, database_url: String, max_size: u32, - traced_connections: Option>, + pub(crate) traced_connections: Option>, + _marker: PhantomData, } -impl fmt::Debug for ConnectionPool { +impl fmt::Debug for ConnectionPool { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { // We don't print the `database_url`, as is may contain // sensitive information (e.g. database password). @@ -247,7 +264,7 @@ impl fmt::Debug for ConnectionPool { } } -impl ConnectionPool { +impl ConnectionPool { const TEST_ACQUIRE_TIMEOUT: Duration = Duration::from_secs(10); /// Returns a reference to the global configuration parameters applied for all DB pools. For consistency, these parameters @@ -261,14 +278,14 @@ impl ConnectionPool { /// /// Test pools trace their active connections. If acquiring a connection fails (e.g., with a timeout), /// the returned error will contain information on all active connections. - pub async fn test_pool() -> ConnectionPool { + pub async fn test_pool() -> ConnectionPool { const DEFAULT_CONNECTIONS: u32 = 50; // Expected to be enough for any unit test. Self::constrained_test_pool(DEFAULT_CONNECTIONS).await } /// Same as [`Self::test_pool()`], but with a configurable number of connections. This is useful to test /// behavior of components that rely on singleton / constrained pools in production. - pub async fn constrained_test_pool(connections: u32) -> ConnectionPool { + pub async fn constrained_test_pool(connections: u32) -> ConnectionPool { assert!(connections > 0, "Number of connections must be positive"); let mut builder = TestTemplate::empty() .expect("failed creating test template") @@ -285,18 +302,19 @@ impl ConnectionPool { } /// Initializes a builder for connection pools. - pub fn builder(database_url: &str, max_pool_size: u32) -> ConnectionPoolBuilder { + pub fn builder(database_url: &str, max_pool_size: u32) -> ConnectionPoolBuilder { ConnectionPoolBuilder { database_url: database_url.to_string(), max_size: max_pool_size, acquire_timeout: Duration::from_secs(30), // Default value used by `sqlx` statement_timeout: None, + _marker: Default::default(), } } /// Initializes a builder for connection pools with a single connection. This is equivalent /// to calling `Self::builder(db_url, 1)`. - pub fn singleton(database_url: &str) -> ConnectionPoolBuilder { + pub fn singleton(database_url: &str) -> ConnectionPoolBuilder { Self::builder(database_url, 1) } @@ -307,12 +325,6 @@ impl ConnectionPool { self.max_size } - /// Uses this pool to report Postgres-wide metrics (e.g., table sizes). Should be called sparingly to not spam - /// identical metrics from multiple places. The returned future runs indefinitely and should be spawned as a Tokio task. - pub async fn run_postgres_metrics_scraping(self, scrape_interval: Duration) { - PostgresMetrics::run_scraping(self, scrape_interval).await; - } - /// Creates a `StorageProcessor` entity over a recoverable connection. /// Upon a database outage connection will block the thread until /// it will be able to recover the connection (or, if connection cannot @@ -321,7 +333,7 @@ impl ConnectionPool { /// /// This method is intended to be used in crucial contexts, where the /// database access is must-have (e.g. block committer). - pub async fn access_storage(&self) -> anyhow::Result> { + pub async fn access_storage(&self) -> anyhow::Result> { self.access_storage_inner(None).await } @@ -335,7 +347,7 @@ impl ConnectionPool { pub fn access_storage_tagged( &self, requester: &'static str, - ) -> impl Future>> + '_ { + ) -> impl Future>> + '_ { let location = Location::caller(); async move { let tags = StorageProcessorTags { @@ -349,7 +361,7 @@ impl ConnectionPool { async fn access_storage_inner( &self, tags: Option, - ) -> anyhow::Result> { + ) -> anyhow::Result> { let acquire_latency = CONNECTION_METRICS.acquire.start(); let conn = self .acquire_connection_retried(tags.as_ref()) @@ -359,7 +371,8 @@ impl ConnectionPool { if let Some(tags) = &tags { CONNECTION_METRICS.acquire_tagged[&tags.requester].observe(elapsed); } - Ok(StorageProcessor::from_pool( + + Ok(StorageProcessor::::from_pool( conn, tags, self.traced_connections.as_deref(), @@ -424,17 +437,18 @@ mod tests { use assert_matches::assert_matches; use super::*; + use crate::utils::InternalMarker; #[tokio::test] async fn setting_statement_timeout() { let db_url = TestTemplate::empty() .unwrap() - .create_db(1) + .create_db::(1) .await .unwrap() .database_url; - let pool = ConnectionPool::singleton(&db_url) + let pool = ConnectionPool::::singleton(&db_url) .set_statement_timeout(Some(Duration::from_secs(1))) .build() .await diff --git a/core/lib/dal/src/healthcheck.rs b/core/lib/db_connection/src/healthcheck.rs similarity index 75% rename from core/lib/dal/src/healthcheck.rs rename to core/lib/db_connection/src/healthcheck.rs index d2289d7c61f6..cfc2a71c245a 100644 --- a/core/lib/dal/src/healthcheck.rs +++ b/core/lib/db_connection/src/healthcheck.rs @@ -1,7 +1,7 @@ use serde::Serialize; use zksync_health_check::{async_trait, CheckHealth, Health, HealthStatus}; -use crate::ConnectionPool; +use crate::{connection::ConnectionPool, processor::StorageMarker}; #[derive(Debug, Serialize)] struct ConnectionPoolHealthDetails { @@ -10,7 +10,7 @@ struct ConnectionPoolHealthDetails { } impl ConnectionPoolHealthDetails { - fn new(pool: &ConnectionPool) -> Self { + fn new(pool: &ConnectionPool) -> Self { Self { pool_size: pool.inner.size(), max_size: pool.max_size(), @@ -22,18 +22,18 @@ impl ConnectionPoolHealthDetails { // This guarantees that the app can use it's main "communication" channel. // Used in the /health endpoint #[derive(Clone, Debug)] -pub struct ConnectionPoolHealthCheck { - connection_pool: ConnectionPool, +pub struct ConnectionPoolHealthCheck { + connection_pool: ConnectionPool, } -impl ConnectionPoolHealthCheck { - pub fn new(connection_pool: ConnectionPool) -> ConnectionPoolHealthCheck { +impl ConnectionPoolHealthCheck { + pub fn new(connection_pool: ConnectionPool) -> ConnectionPoolHealthCheck { Self { connection_pool } } } #[async_trait] -impl CheckHealth for ConnectionPoolHealthCheck { +impl CheckHealth for ConnectionPoolHealthCheck { fn name(&self) -> &'static str { "connection_pool" } diff --git a/core/lib/dal/src/instrument.rs b/core/lib/db_connection/src/instrument.rs similarity index 87% rename from core/lib/dal/src/instrument.rs rename to core/lib/db_connection/src/instrument.rs index 1938b39acd04..76517c06255f 100644 --- a/core/lib/dal/src/instrument.rs +++ b/core/lib/db_connection/src/instrument.rs @@ -21,8 +21,10 @@ use sqlx::{ use tokio::time::Instant; use crate::{ - connection::{ConnectionPool, StorageProcessor, StorageProcessorTags}, + connection::ConnectionPool, metrics::REQUEST_METRICS, + processor::{StorageMarker, StorageProcessor, StorageProcessorTags}, + utils::InternalMarker, }; type ThreadSafeDebug<'a> = dyn fmt::Debug + Send + Sync + 'a; @@ -51,7 +53,7 @@ impl fmt::Display for QueryArgs<'_> { } /// Extension trait for instrumenting `sqlx::query!` outputs. -pub(crate) trait InstrumentExt: Sized { +pub trait InstrumentExt: Sized { /// Instruments a query, assigning it the provided name. fn instrument(self, name: &'static str) -> Instrumented<'static, Self>; } @@ -132,7 +134,8 @@ impl<'a> InstrumentedData<'a> { let started_at = Instant::now(); tokio::pin!(query_future); - let slow_query_threshold = ConnectionPool::global_config().slow_query_threshold(); + let slow_query_threshold = + ConnectionPool::::global_config().slow_query_threshold(); let mut is_slow = false; let output = tokio::time::timeout_at(started_at + slow_query_threshold, &mut query_future).await; @@ -189,7 +192,7 @@ impl<'a> InstrumentedData<'a> { /// - Slow and erroneous queries are also reported using metrics (`dal.request.slow` and `dal.request.error`, /// respectively). The query name is included as a metric label; args are not included for obvious reasons. #[derive(Debug)] -pub(crate) struct Instrumented<'a, Q> { +pub struct Instrumented<'a, Q> { query: Q, data: InstrumentedData<'a>, } @@ -219,15 +222,18 @@ where A: 'q + IntoArguments<'q, Postgres>, { /// Executes an SQL statement using this query. - pub async fn execute(self, storage: &mut StorageProcessor<'_>) -> sqlx::Result { + pub async fn execute( + self, + storage: &mut StorageProcessor<'_, SM>, + ) -> sqlx::Result { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.execute(conn)).await } /// Fetches an optional row using this query. - pub async fn fetch_optional( + pub async fn fetch_optional( self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, SM>, ) -> Result, sqlx::Error> { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.fetch_optional(conn)).await @@ -240,7 +246,10 @@ where O: Send + Unpin + for<'r> FromRow<'r, PgRow>, { /// Fetches all rows using this query and collects them into a `Vec`. - pub async fn fetch_all(self, storage: &mut StorageProcessor<'_>) -> sqlx::Result> { + pub async fn fetch_all( + self, + storage: &mut StorageProcessor<'_, SM>, + ) -> sqlx::Result> { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.fetch_all(conn)).await } @@ -253,22 +262,28 @@ where A: 'q + Send + IntoArguments<'q, Postgres>, { /// Fetches an optional row using this query. - pub async fn fetch_optional( + pub async fn fetch_optional( self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, SM>, ) -> sqlx::Result> { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.fetch_optional(conn)).await } /// Fetches a single row using this query. - pub async fn fetch_one(self, storage: &mut StorageProcessor<'_>) -> sqlx::Result { + pub async fn fetch_one( + self, + storage: &mut StorageProcessor<'_, SM>, + ) -> sqlx::Result { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.fetch_one(conn)).await } /// Fetches all rows using this query and collects them into a `Vec`. - pub async fn fetch_all(self, storage: &mut StorageProcessor<'_>) -> sqlx::Result> { + pub async fn fetch_all( + self, + storage: &mut StorageProcessor<'_, SM>, + ) -> sqlx::Result> { let (conn, tags) = storage.conn_and_tags(); self.data.fetch(tags, self.query.fetch_all(conn)).await } @@ -276,14 +291,14 @@ where #[cfg(test)] mod tests { - use zksync_types::{MiniblockNumber, H256}; + use zksync_basic_types::{MiniblockNumber, H256}; use super::*; - use crate::ConnectionPool; + use crate::{connection::ConnectionPool, utils::InternalMarker}; #[tokio::test] async fn instrumenting_erroneous_query() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; // Add `vlog::init()` here to debug this test let mut conn = pool.access_storage().await.unwrap(); @@ -299,7 +314,7 @@ mod tests { #[tokio::test] async fn instrumenting_slow_query() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; // Add `vlog::init()` here to debug this test let mut conn = pool.access_storage().await.unwrap(); diff --git a/core/lib/db_connection/src/lib.rs b/core/lib/db_connection/src/lib.rs new file mode 100644 index 000000000000..2e05648b2b7e --- /dev/null +++ b/core/lib/db_connection/src/lib.rs @@ -0,0 +1,8 @@ +pub mod connection; +pub mod healthcheck; +pub mod instrument; +pub mod metrics; +pub mod processor; +#[macro_use] +pub mod macro_utils; +pub mod utils; diff --git a/core/lib/dal/src/macro_utils.rs b/core/lib/db_connection/src/macro_utils.rs similarity index 99% rename from core/lib/dal/src/macro_utils.rs rename to core/lib/db_connection/src/macro_utils.rs index 088c48331ebf..23406ff06ee7 100644 --- a/core/lib/dal/src/macro_utils.rs +++ b/core/lib/db_connection/src/macro_utils.rs @@ -1,6 +1,7 @@ //! Miscellaneous helper macros. /// Writes to a [`String`]. This is equivalent to `write!`, but without the need to `unwrap()` the result. +#[macro_export] macro_rules! write_str { ($buffer:expr, $($args:tt)+) => {{ use std::fmt::Write as _; @@ -11,6 +12,7 @@ macro_rules! write_str { /// Writing a line to a [`String`]. This is equivalent to `writeln!`, but without the need /// to `unwrap()` the result. +#[macro_export] macro_rules! writeln_str { ($buffer:expr, $($args:tt)+) => {{ use std::fmt::Write as _; @@ -24,6 +26,7 @@ macro_rules! writeln_str { /// /// We use tail recursion and accumulate (possibly substituted) parts in an accumulator. This is because `query_as!` would not /// work otherwise; its input must be fully expanded. +#[macro_export] macro_rules! interpolate_query { // Terminal clause: we have a final substitution. (query_type: $query_type:ty; acc: $acc:expr; args: $($args:expr,)*; (_,) => $var:literal,) => { @@ -77,6 +80,7 @@ macro_rules! interpolate_query { /// The number of arguments may differ across variants (e.g., one of variants may introduce one or more additional args). /// /// See the crate code for examples of usage. +#[macro_export] macro_rules! match_query_as { ( $query_type:ty, diff --git a/core/lib/db_connection/src/metrics.rs b/core/lib/db_connection/src/metrics.rs new file mode 100644 index 000000000000..4d90ce188fa4 --- /dev/null +++ b/core/lib/db_connection/src/metrics.rs @@ -0,0 +1,96 @@ +use std::{thread, time::Duration}; + +use vise::{ + Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Histogram, LabeledFamily, + LatencyObserver, Metrics, Unit, +}; + +/// Request-related DB metrics. +#[derive(Debug, Metrics)] +#[metrics(prefix = "sql")] +pub(crate) struct RequestMetrics { + /// Latency of a DB request. + #[metrics(buckets = Buckets::LATENCIES, labels = ["method"])] + pub request: LabeledFamily<&'static str, Histogram>, + /// Counter of slow DB requests. + #[metrics(labels = ["method"])] + pub request_slow: LabeledFamily<&'static str, Counter>, + /// Counter of errored DB requests. + #[metrics(labels = ["method"])] + pub request_error: LabeledFamily<&'static str, Counter>, +} + +#[vise::register] +pub(crate) static REQUEST_METRICS: vise::Global = vise::Global::new(); + +/// Reporter of latency for DAL methods consisting of multiple DB queries. If there's a single query, +/// use `.instrument().report_latency()` on it instead. +/// +/// Should be created at the start of the relevant method and dropped when the latency needs to be reported. +#[derive(Debug)] +pub struct MethodLatency(Option>); + +impl MethodLatency { + pub fn new(name: &'static str) -> Self { + Self(Some(REQUEST_METRICS.request[&name].start())) + } +} + +impl Drop for MethodLatency { + fn drop(&mut self) { + if !thread::panicking() { + let observer = self.0.take().unwrap(); + // `unwrap()` is safe; the observer is only taken out on drop + observer.observe(); + } + } +} + +/// Kind of a connection error. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "kind", rename_all = "snake_case")] +pub(crate) enum ConnectionErrorKind { + Timeout, + Database, + Io, + Other, +} + +impl From<&sqlx::Error> for ConnectionErrorKind { + fn from(err: &sqlx::Error) -> Self { + match err { + sqlx::Error::PoolTimedOut => Self::Timeout, + sqlx::Error::Database(_) => Self::Database, + sqlx::Error::Io(_) => Self::Io, + _ => Self::Other, + } + } +} + +const POOL_SIZE_BUCKETS: Buckets = Buckets::linear(0.0..=100.0, 10.0); + +/// Connection-related metrics. +#[derive(Debug, Metrics)] +#[metrics(prefix = "sql_connection")] +pub(crate) struct ConnectionMetrics { + /// Latency of acquiring a DB connection. + #[metrics(buckets = Buckets::LATENCIES)] + pub acquire: Histogram, + /// Latency of acquiring a DB connection, tagged with the requester label. + #[metrics(buckets = Buckets::LATENCIES, labels = ["requester"])] + pub acquire_tagged: LabeledFamily<&'static str, Histogram>, + /// Current DB pool size. + #[metrics(buckets = POOL_SIZE_BUCKETS)] + pub pool_size: Histogram, + /// Current number of idle connections in the DB pool. + #[metrics(buckets = POOL_SIZE_BUCKETS)] + pub pool_idle: Histogram, + /// Number of errors occurred when acquiring a DB connection. + pub pool_acquire_error: Family, + /// Lifetime of a DB connection, tagged with the requester label. + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds, labels = ["requester"])] + pub lifetime: LabeledFamily<&'static str, Histogram>, +} + +#[vise::register] +pub(crate) static CONNECTION_METRICS: vise::Global = vise::Global::new(); diff --git a/core/lib/dal/src/connection/processor.rs b/core/lib/db_connection/src/processor.rs similarity index 86% rename from core/lib/dal/src/connection/processor.rs rename to core/lib/db_connection/src/processor.rs index ce5da9a8a8f9..d0bf1e4eb381 100644 --- a/core/lib/dal/src/connection/processor.rs +++ b/core/lib/db_connection/src/processor.rs @@ -11,11 +11,11 @@ use std::{ use sqlx::{pool::PoolConnection, types::chrono, Connection, PgConnection, Postgres, Transaction}; -use crate::{metrics::CONNECTION_METRICS, ConnectionPool}; +use crate::{connection::ConnectionPool, metrics::CONNECTION_METRICS, utils::InternalMarker}; /// Tags that can be associated with a connection. #[derive(Debug, Clone, Copy, PartialEq)] -pub(crate) struct StorageProcessorTags { +pub struct StorageProcessorTags { pub requester: &'static str, pub location: &'static Location<'static>, } @@ -54,7 +54,7 @@ impl fmt::Debug for TracedConnectionInfo { /// Traced active connections for a connection pool. #[derive(Default)] -pub(super) struct TracedConnections { +pub struct TracedConnections { connections: Mutex>, next_id: AtomicUsize, } @@ -113,7 +113,9 @@ impl Drop for PooledStorageProcessor<'_> { let lifetime = self.created_at.elapsed(); CONNECTION_METRICS.lifetime[&tags.requester].observe(lifetime); - if lifetime > ConnectionPool::global_config().long_connection_threshold() { + if lifetime + > ConnectionPool::::global_config().long_connection_threshold() + { let file = tags.location.file(); let line = tags.location.line(); tracing::info!( @@ -137,22 +139,53 @@ enum StorageProcessorInner<'a> { }, } +/// Marker trait for restricting using all possible types as a storage marker. +pub trait StorageMarker {} + /// Storage processor is the main storage interaction point. /// It holds down the connection (either direct or pooled) to the database /// and provide methods to obtain different storage schema. #[derive(Debug)] -pub struct StorageProcessor<'a> { +pub struct StorageProcessor<'a, SM: StorageMarker> { inner: StorageProcessorInner<'a>, + _marker: std::marker::PhantomData, } -impl<'a> StorageProcessor<'a> { - pub async fn start_transaction(&mut self) -> sqlx::Result> { +impl<'a, SM: StorageMarker> StorageProcessor<'a, SM> { + /// Creates a `StorageProcessor` using a pool of connections. + /// This method borrows one of the connections from the pool, and releases it + /// after `drop`. + pub(crate) fn from_pool( + connection: PoolConnection, + tags: Option, + traced_connections: Option<&'a TracedConnections>, + ) -> Self { + let created_at = Instant::now(); + let inner = StorageProcessorInner::Pooled(PooledStorageProcessor { + connection, + tags, + created_at, + traced: traced_connections.map(|connections| { + let id = connections.acquire(tags, created_at); + (connections, id) + }), + }); + Self { + inner, + _marker: Default::default(), + } + } + + pub async fn start_transaction(&mut self) -> sqlx::Result> { let (conn, tags) = self.conn_and_tags(); let inner = StorageProcessorInner::Transaction { transaction: conn.begin().await?, tags, }; - Ok(StorageProcessor { inner }) + Ok(StorageProcessor { + inner, + _marker: Default::default(), + }) } /// Checks if the `StorageProcessor` is currently within database transaction. @@ -172,32 +205,11 @@ impl<'a> StorageProcessor<'a> { } } - /// Creates a `StorageProcessor` using a pool of connections. - /// This method borrows one of the connections from the pool, and releases it - /// after `drop`. - pub(super) fn from_pool( - connection: PoolConnection, - tags: Option, - traced_connections: Option<&'a TracedConnections>, - ) -> Self { - let created_at = Instant::now(); - let inner = StorageProcessorInner::Pooled(PooledStorageProcessor { - connection, - tags, - created_at, - traced: traced_connections.map(|connections| { - let id = connections.acquire(tags, created_at); - (connections, id) - }), - }); - Self { inner } - } - - pub(crate) fn conn(&mut self) -> &mut PgConnection { + pub fn conn(&mut self) -> &mut PgConnection { self.conn_and_tags().0 } - pub(crate) fn conn_and_tags(&mut self) -> (&mut PgConnection, Option<&StorageProcessorTags>) { + pub fn conn_and_tags(&mut self) -> (&mut PgConnection, Option<&StorageProcessorTags>) { match &mut self.inner { StorageProcessorInner::Pooled(pooled) => (&mut pooled.connection, pooled.tags.as_ref()), StorageProcessorInner::Transaction { transaction, tags } => (transaction, *tags), @@ -207,11 +219,11 @@ impl<'a> StorageProcessor<'a> { #[cfg(test)] mod tests { - use crate::ConnectionPool; + use crate::{connection::ConnectionPool, utils::InternalMarker}; #[tokio::test] async fn processor_tags_propagate_to_transactions() { - let pool = ConnectionPool::constrained_test_pool(1).await; + let pool = ConnectionPool::::constrained_test_pool(1).await; let mut connection = pool.access_storage_tagged("test").await.unwrap(); assert!(!connection.in_transaction()); let original_tags = *connection.conn_and_tags().1.unwrap(); @@ -224,7 +236,7 @@ mod tests { #[tokio::test] async fn tracing_connections() { - let pool = ConnectionPool::constrained_test_pool(1).await; + let pool = ConnectionPool::::constrained_test_pool(1).await; let connection = pool.access_storage_tagged("test").await.unwrap(); let traced = pool.traced_connections.as_deref().unwrap(); { diff --git a/core/lib/dal/src/time_utils.rs b/core/lib/db_connection/src/utils.rs similarity index 80% rename from core/lib/dal/src/time_utils.rs rename to core/lib/db_connection/src/utils.rs index 0ede5e6fc576..bed4f0ffea69 100644 --- a/core/lib/dal/src/time_utils.rs +++ b/core/lib/db_connection/src/utils.rs @@ -2,6 +2,13 @@ use std::time::Duration; use sqlx::{postgres::types::PgInterval, types::chrono::NaiveTime}; +use crate::processor::StorageMarker; + +#[derive(Debug)] +pub(crate) struct InternalMarker; + +impl StorageMarker for InternalMarker {} + pub fn duration_to_naive_time(duration: Duration) -> NaiveTime { let total_seconds = duration.as_secs() as u32; NaiveTime::from_hms_opt( diff --git a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs index 8ab728e8ce38..c2c91d7be287 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs @@ -5,7 +5,7 @@ use zksync_test_account::TxType; use zksync_types::{ ethabi::{Contract, Token}, get_code_key, get_known_code_key, - protocol_version::ProtocolUpgradeTxCommonData, + protocol_upgrade::ProtocolUpgradeTxCommonData, Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, diff --git a/core/lib/snapshots_applier/Cargo.toml b/core/lib/snapshots_applier/Cargo.toml index 533858b88fc2..656bfc94a624 100644 --- a/core/lib/snapshots_applier/Cargo.toml +++ b/core/lib/snapshots_applier/Cargo.toml @@ -10,6 +10,7 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] +zksync_db_connection = { path = "../../lib/db_connection" } zksync_dal = { path = "../../lib/dal" } zksync_health_check = { path = "../../lib/health_check" } zksync_types = { path = "../../lib/types" } diff --git a/core/lib/snapshots_applier/src/lib.rs b/core/lib/snapshots_applier/src/lib.rs index 1dd7793d456c..97a344e44ce4 100644 --- a/core/lib/snapshots_applier/src/lib.rs +++ b/core/lib/snapshots_applier/src/lib.rs @@ -6,7 +6,7 @@ use anyhow::Context as _; use async_trait::async_trait; use serde::Serialize; use tokio::sync::Semaphore; -use zksync_dal::{ConnectionPool, SqlxError, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, SqlxError, StorageProcessor}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_object_store::{ObjectStore, ObjectStoreError}; use zksync_types::{ @@ -186,7 +186,7 @@ impl SnapshotsApplierConfig { /// - Storage contains at least one L1 batch pub async fn run( self, - connection_pool: &ConnectionPool, + connection_pool: &ConnectionPool, main_node_client: &dyn SnapshotsApplierMainNodeClient, blob_store: &dyn ObjectStore, ) -> anyhow::Result<()> { @@ -234,7 +234,7 @@ impl SnapshotsApplierConfig { /// Applying application-level storage snapshots to the Postgres storage. #[derive(Debug)] struct SnapshotsApplier<'a> { - connection_pool: &'a ConnectionPool, + connection_pool: &'a ConnectionPool, main_node_client: &'a dyn SnapshotsApplierMainNodeClient, blob_store: &'a dyn ObjectStore, applied_snapshot_status: SnapshotRecoveryStatus, @@ -246,7 +246,7 @@ struct SnapshotsApplier<'a> { impl<'a> SnapshotsApplier<'a> { /// Recovers [`SnapshotRecoveryStatus`] from the storage and the main node. async fn prepare_applied_snapshot_status( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, main_node_client: &dyn SnapshotsApplierMainNodeClient, ) -> Result<(SnapshotRecoveryStatus, bool), SnapshotsApplierError> { let latency = @@ -307,7 +307,7 @@ impl<'a> SnapshotsApplier<'a> { } async fn load_snapshot( - connection_pool: &'a ConnectionPool, + connection_pool: &'a ConnectionPool, main_node_client: &'a dyn SnapshotsApplierMainNodeClient, blob_store: &'a dyn ObjectStore, health_updater: &'a HealthUpdater, @@ -428,7 +428,7 @@ impl<'a> SnapshotsApplier<'a> { async fn recover_factory_deps( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, ) -> Result<(), SnapshotsApplierError> { let latency = METRICS.initial_stage_duration[&InitialStage::ApplyFactoryDeps].start(); @@ -472,7 +472,7 @@ impl<'a> SnapshotsApplier<'a> { &self, chunk_id: u64, storage_logs: &[SnapshotStorageLog], - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, ) -> Result<(), SnapshotsApplierError> { storage .storage_logs_dedup_dal() @@ -490,7 +490,7 @@ impl<'a> SnapshotsApplier<'a> { &self, chunk_id: u64, storage_logs: &[SnapshotStorageLog], - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, ) -> Result<(), SnapshotsApplierError> { storage .storage_logs_dal() diff --git a/core/lib/snapshots_applier/src/tests/mod.rs b/core/lib/snapshots_applier/src/tests/mod.rs index 5dc7a26bdc40..c721c0daaf1e 100644 --- a/core/lib/snapshots_applier/src/tests/mod.rs +++ b/core/lib/snapshots_applier/src/tests/mod.rs @@ -24,9 +24,9 @@ async fn snapshots_creator_can_successfully_recover_db( with_object_store_errors: bool, ) { let pool = if let Some(pool_size) = pool_size { - ConnectionPool::constrained_test_pool(pool_size).await + ConnectionPool::::constrained_test_pool(pool_size).await } else { - ConnectionPool::test_pool().await + ConnectionPool::::test_pool().await }; let expected_status = mock_recovery_status(); let storage_logs = random_storage_logs(expected_status.l1_batch_number, 200); @@ -98,7 +98,7 @@ async fn snapshots_creator_can_successfully_recover_db( #[tokio::test] async fn applier_errors_after_genesis() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; // We don't want to depend on the core crate, so instead we cheaply emulate it. let mut storage = pool.access_storage().await.unwrap(); @@ -154,7 +154,7 @@ async fn applier_errors_after_genesis() { #[tokio::test] async fn applier_errors_without_snapshots() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let object_store_factory = ObjectStoreFactory::mock(); let object_store = object_store_factory.create_store().await; let client = MockMainNodeClient::default(); @@ -167,7 +167,7 @@ async fn applier_errors_without_snapshots() { #[tokio::test] async fn applier_returns_error_on_fatal_object_store_error() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let expected_status = mock_recovery_status(); let storage_logs = random_storage_logs(expected_status.l1_batch_number, 100); let (object_store, client) = prepare_clients(&expected_status, &storage_logs).await; @@ -189,7 +189,7 @@ async fn applier_returns_error_on_fatal_object_store_error() { #[tokio::test] async fn applier_returns_error_after_too_many_object_store_retries() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let expected_status = mock_recovery_status(); let storage_logs = random_storage_logs(expected_status.l1_batch_number, 100); let (object_store, client) = prepare_clients(&expected_status, &storage_logs).await; @@ -211,7 +211,7 @@ async fn applier_returns_error_after_too_many_object_store_retries() { #[tokio::test] async fn recovering_tokens() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let expected_status = mock_recovery_status(); let tokens = mock_tokens(); let mut storage_logs = random_storage_logs(expected_status.l1_batch_number, 200); diff --git a/core/lib/state/src/postgres/mod.rs b/core/lib/state/src/postgres/mod.rs index 9e8d97befe75..6e48124c002f 100644 --- a/core/lib/state/src/postgres/mod.rs +++ b/core/lib/state/src/postgres/mod.rs @@ -11,7 +11,7 @@ use tokio::{ watch, }, }; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_types::{L1BatchNumber, MiniblockNumber, StorageKey, StorageValue, H256}; use self::metrics::{Method, ValuesUpdateStage, CACHE_METRICS, STORAGE_METRICS}; @@ -150,7 +150,7 @@ impl ValuesCache { &self, from_miniblock: MiniblockNumber, to_miniblock: MiniblockNumber, - connection: &mut StorageProcessor<'_>, + connection: &mut StorageProcessor<'_, Server>, ) -> anyhow::Result<()> { const MAX_MINIBLOCKS_LAG: u32 = 5; @@ -295,7 +295,7 @@ impl PostgresStorageCaches { pub fn configure_storage_values_cache( &mut self, capacity: u64, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, ) -> PostgresStorageCachesTask { assert!( capacity > 0, @@ -344,7 +344,7 @@ impl PostgresStorageCaches { /// An asynchronous task that updates the VM storage values cache. #[derive(Debug)] pub struct PostgresStorageCachesTask { - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, values_cache: ValuesCache, command_receiver: UnboundedReceiver, } @@ -391,7 +391,7 @@ impl PostgresStorageCachesTask { #[derive(Debug)] pub struct PostgresStorage<'a> { rt_handle: Handle, - connection: StorageProcessor<'a>, + connection: StorageProcessor<'a, Server>, miniblock_number: MiniblockNumber, l1_batch_number_for_miniblock: L1BatchNumber, pending_l1_batch_number: L1BatchNumber, @@ -407,7 +407,7 @@ impl<'a> PostgresStorage<'a> { /// Panics on Postgres errors. pub fn new( rt_handle: Handle, - connection: StorageProcessor<'a>, + connection: StorageProcessor<'a, Server>, block_number: MiniblockNumber, consider_new_l1_batch: bool, ) -> Self { @@ -429,7 +429,7 @@ impl<'a> PostgresStorage<'a> { /// Propagates Postgres errors. pub async fn new_async( rt_handle: Handle, - mut connection: StorageProcessor<'a>, + mut connection: StorageProcessor<'a, Server>, block_number: MiniblockNumber, consider_new_l1_batch: bool, ) -> anyhow::Result> { diff --git a/core/lib/state/src/postgres/tests.rs b/core/lib/state/src/postgres/tests.rs index c9ff6ae546a2..8dc8f439128a 100644 --- a/core/lib/state/src/postgres/tests.rs +++ b/core/lib/state/src/postgres/tests.rs @@ -14,7 +14,7 @@ use super::*; use crate::test_utils::{create_l1_batch, create_miniblock, gen_storage_logs, prepare_postgres}; fn test_postgres_storage_basics( - pool: &ConnectionPool, + pool: &ConnectionPool, rt_handle: Handle, cache_initial_writes: bool, ) { @@ -126,7 +126,7 @@ fn test_postgres_storage_basics( #[tokio::test] async fn postgres_storage_basics() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; tokio::task::spawn_blocking(move || { test_postgres_storage_basics(&pool, Handle::current(), false); }) @@ -136,7 +136,7 @@ async fn postgres_storage_basics() { #[tokio::test] async fn postgres_storage_with_initial_writes_cache() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; tokio::task::spawn_blocking(move || { test_postgres_storage_basics(&pool, Handle::current(), true); }) @@ -145,7 +145,7 @@ async fn postgres_storage_with_initial_writes_cache() { } fn test_postgres_storage_after_sealing_miniblock( - pool: &ConnectionPool, + pool: &ConnectionPool, rt_handle: Handle, consider_new_l1_batch: bool, ) { @@ -192,7 +192,7 @@ fn test_postgres_storage_after_sealing_miniblock( #[tokio::test] async fn postgres_storage_after_sealing_miniblock() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; tokio::task::spawn_blocking(move || { println!("Considering new L1 batch"); test_postgres_storage_after_sealing_miniblock(&pool, Handle::current(), true); @@ -203,7 +203,7 @@ async fn postgres_storage_after_sealing_miniblock() { .unwrap(); } -fn test_factory_deps_cache(pool: &ConnectionPool, rt_handle: Handle) { +fn test_factory_deps_cache(pool: &ConnectionPool, rt_handle: Handle) { let mut connection = rt_handle.block_on(pool.access_storage()).unwrap(); rt_handle.block_on(prepare_postgres(&mut connection)); @@ -248,14 +248,14 @@ fn test_factory_deps_cache(pool: &ConnectionPool, rt_handle: Handle) { #[tokio::test] async fn using_factory_deps_cache() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let handle = Handle::current(); tokio::task::spawn_blocking(move || test_factory_deps_cache(&pool, handle)) .await .unwrap(); } -fn test_initial_writes_cache(pool: &ConnectionPool, rt_handle: Handle) { +fn test_initial_writes_cache(pool: &ConnectionPool, rt_handle: Handle) { let connection = rt_handle.block_on(pool.access_storage()).unwrap(); let caches = PostgresStorageCaches::new(1_024, 4 * 1_024 * 1_024); let mut storage = PostgresStorage::new(rt_handle, connection, MiniblockNumber(0), false) @@ -354,7 +354,7 @@ fn test_initial_writes_cache(pool: &ConnectionPool, rt_handle: Handle) { #[tokio::test] async fn using_initial_writes_cache() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let handle = Handle::current(); tokio::task::spawn_blocking(move || test_initial_writes_cache(&pool, handle)) .await @@ -384,7 +384,7 @@ impl ValuesCache { } } -fn test_values_cache(pool: &ConnectionPool, rt_handle: Handle) { +fn test_values_cache(pool: &ConnectionPool, rt_handle: Handle) { let mut caches = PostgresStorageCaches::new(1_024, 1_024); let _ = caches.configure_storage_values_cache(1_024 * 1_024, pool.clone()); // We cannot use an update task since it requires having concurrent DB connections @@ -499,7 +499,7 @@ fn test_values_cache(pool: &ConnectionPool, rt_handle: Handle) { #[tokio::test] async fn using_values_cache() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let handle = Handle::current(); tokio::task::spawn_blocking(move || test_values_cache(&pool, handle)) .await @@ -508,7 +508,11 @@ async fn using_values_cache() { /// (Sort of) fuzzes [`ValuesCache`] by comparing outputs of [`PostgresStorage`] with and without caching /// on randomly generated `read_value()` queries. -fn mini_fuzz_values_cache_inner(rng: &mut impl Rng, pool: &ConnectionPool, mut rt_handle: Handle) { +fn mini_fuzz_values_cache_inner( + rng: &mut impl Rng, + pool: &ConnectionPool, + mut rt_handle: Handle, +) { let mut caches = PostgresStorageCaches::new(1_024, 1_024); let _ = caches.configure_storage_values_cache(1_024 * 1_024, pool.clone()); let values_cache = caches.values.as_ref().unwrap().cache.clone(); @@ -592,7 +596,7 @@ fn mini_fuzz_values_cache_inner(rng: &mut impl Rng, pool: &ConnectionPool, mut r #[tokio::test] async fn mini_fuzz_values_cache() { const RNG_SEED: u64 = 123; - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let handle = Handle::current(); let mut rng = StdRng::seed_from_u64(RNG_SEED); diff --git a/core/lib/state/src/rocksdb/mod.rs b/core/lib/state/src/rocksdb/mod.rs index e2b5bb66c4d0..3b9c8f78c2bd 100644 --- a/core/lib/state/src/rocksdb/mod.rs +++ b/core/lib/state/src/rocksdb/mod.rs @@ -30,7 +30,7 @@ use std::{ use anyhow::Context as _; use itertools::{Either, Itertools}; use tokio::sync::watch; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_storage::{db::NamedColumnFamily, RocksDB}; use zksync_types::{L1BatchNumber, StorageKey, StorageValue, H256, U256}; use zksync_utils::{h256_to_u256, u256_to_h256}; @@ -165,7 +165,7 @@ impl RocksbStorageBuilder { /// in Postgres. pub async fn synchronize( self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, stop_receiver: &watch::Receiver, ) -> anyhow::Result> { let mut inner = self.0; @@ -183,7 +183,7 @@ impl RocksbStorageBuilder { /// Propagates RocksDB and Postgres errors. pub async fn rollback( mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, last_l1_batch_to_keep: L1BatchNumber, ) -> anyhow::Result<()> { self.0.rollback(storage, last_l1_batch_to_keep).await @@ -230,7 +230,7 @@ impl RocksdbStorage { async fn update_from_postgres( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, stop_receiver: &watch::Receiver, ) -> Result<(), RocksdbSyncError> { let mut current_l1_batch_number = self @@ -316,7 +316,7 @@ impl RocksdbStorage { async fn apply_storage_logs( &mut self, storage_logs: HashMap, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, ) -> anyhow::Result<()> { let db = self.db.clone(); let processed_logs = @@ -357,7 +357,7 @@ impl RocksdbStorage { async fn save_missing_enum_indices( &self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, ) -> anyhow::Result<()> { let (true, Some(start_from)) = ( self.enum_index_migration_chunk_size > 0, @@ -481,7 +481,7 @@ impl RocksdbStorage { async fn rollback( &mut self, - connection: &mut StorageProcessor<'_>, + connection: &mut StorageProcessor<'_, Server>, last_l1_batch_to_keep: L1BatchNumber, ) -> anyhow::Result<()> { tracing::info!("Rolling back state keeper storage to L1 batch #{last_l1_batch_to_keep}..."); diff --git a/core/lib/state/src/rocksdb/recovery.rs b/core/lib/state/src/rocksdb/recovery.rs index 1584a23822cf..d26c32bc5f52 100644 --- a/core/lib/state/src/rocksdb/recovery.rs +++ b/core/lib/state/src/rocksdb/recovery.rs @@ -4,7 +4,7 @@ use std::ops; use anyhow::Context as _; use tokio::sync::watch; -use zksync_dal::{storage_logs_dal::StorageRecoveryLogEntry, StorageProcessor}; +use zksync_dal::{storage_logs_dal::StorageRecoveryLogEntry, Server, ServerDals, StorageProcessor}; use zksync_types::{ snapshots::{uniform_hashed_keys_chunk, SnapshotRecoveryStatus}, L1BatchNumber, MiniblockNumber, H256, @@ -30,7 +30,7 @@ impl RocksdbStorage { /// Returns the next L1 batch that should be fed to the storage. pub(super) async fn ensure_ready( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, desired_log_chunk_size: u64, stop_receiver: &watch::Receiver, ) -> Result { @@ -65,7 +65,7 @@ impl RocksdbStorage { /// (it would be considered complete even if it failed in the middle). async fn recover_from_snapshot( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, snapshot_recovery: &SnapshotRecoveryStatus, desired_log_chunk_size: u64, stop_receiver: &watch::Receiver, @@ -140,7 +140,7 @@ impl RocksdbStorage { async fn recover_factory_deps( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, snapshot_recovery: &SnapshotRecoveryStatus, ) -> anyhow::Result<()> { // We don't expect that many factory deps; that's why we recover factory deps in any case. @@ -169,7 +169,7 @@ impl RocksdbStorage { } async fn load_key_chunks( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, snapshot_recovery: &SnapshotRecoveryStatus, desired_log_chunk_size: u64, ) -> anyhow::Result> { @@ -219,7 +219,7 @@ impl RocksdbStorage { async fn recover_logs_chunk( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, snapshot_miniblock: MiniblockNumber, key_chunk: ops::RangeInclusive, ) -> anyhow::Result<()> { diff --git a/core/lib/state/src/rocksdb/tests.rs b/core/lib/state/src/rocksdb/tests.rs index a7bd03a76f86..52f35e6f58b8 100644 --- a/core/lib/state/src/rocksdb/tests.rs +++ b/core/lib/state/src/rocksdb/tests.rs @@ -5,7 +5,7 @@ use std::fmt; use assert_matches::assert_matches; use tempfile::TempDir; use test_casing::test_casing; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use zksync_types::{MiniblockNumber, StorageLog}; use super::*; @@ -76,7 +76,10 @@ async fn rocksdb_storage_basics() { } } -async fn sync_test_storage(dir: &TempDir, conn: &mut StorageProcessor<'_>) -> RocksdbStorage { +async fn sync_test_storage( + dir: &TempDir, + conn: &mut StorageProcessor<'_, Server>, +) -> RocksdbStorage { let (_stop_sender, stop_receiver) = watch::channel(false); RocksdbStorage::builder(dir.path()) .await @@ -89,7 +92,7 @@ async fn sync_test_storage(dir: &TempDir, conn: &mut StorageProcessor<'_>) -> Ro #[tokio::test] async fn rocksdb_storage_syncing_with_postgres() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); prepare_postgres(&mut conn).await; let storage_logs = gen_storage_logs(20..40); @@ -107,7 +110,7 @@ async fn rocksdb_storage_syncing_with_postgres() { #[tokio::test] async fn rocksdb_storage_syncing_fault_tolerance() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); prepare_postgres(&mut conn).await; let storage_logs = gen_storage_logs(100..200); @@ -156,7 +159,7 @@ async fn rocksdb_storage_syncing_fault_tolerance() { } async fn insert_factory_deps( - conn: &mut StorageProcessor<'_>, + conn: &mut StorageProcessor<'_, Server>, miniblock_number: MiniblockNumber, indices: impl Iterator, ) { @@ -171,7 +174,7 @@ async fn insert_factory_deps( #[tokio::test] async fn rocksdb_storage_revert() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); prepare_postgres(&mut conn).await; let storage_logs = gen_storage_logs(20..40); @@ -242,7 +245,7 @@ async fn rocksdb_storage_revert() { #[tokio::test] async fn rocksdb_enum_index_migration() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); prepare_postgres(&mut conn).await; let storage_logs = gen_storage_logs(20..40); @@ -327,7 +330,7 @@ async fn rocksdb_enum_index_migration() { #[test_casing(4, [RocksdbStorage::DESIRED_LOG_CHUNK_SIZE, 20, 5, 1])] #[tokio::test] async fn low_level_snapshot_recovery(log_chunk_size: u64) { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let (snapshot_recovery, mut storage_logs) = prepare_postgres_for_snapshot_recovery(&mut conn).await; @@ -359,7 +362,7 @@ async fn low_level_snapshot_recovery(log_chunk_size: u64) { #[tokio::test] async fn recovering_factory_deps_from_snapshot() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let (snapshot_recovery, _) = prepare_postgres_for_snapshot_recovery(&mut conn).await; @@ -386,7 +389,7 @@ async fn recovering_factory_deps_from_snapshot() { #[tokio::test] async fn recovering_from_snapshot_and_following_logs() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let (snapshot_recovery, mut storage_logs) = prepare_postgres_for_snapshot_recovery(&mut conn).await; @@ -453,7 +456,7 @@ async fn recovering_from_snapshot_and_following_logs() { #[tokio::test] async fn recovery_fault_tolerance() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let (_, storage_logs) = prepare_postgres_for_snapshot_recovery(&mut conn).await; let log_chunk_size = storage_logs.len() as u64 / 5; diff --git a/core/lib/state/src/test_utils.rs b/core/lib/state/src/test_utils.rs index 77c9e9029ea8..420a79ae089a 100644 --- a/core/lib/state/src/test_utils.rs +++ b/core/lib/state/src/test_utils.rs @@ -2,7 +2,7 @@ use std::ops; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_types::{ block::{L1BatchHeader, MiniblockHeader}, snapshots::SnapshotRecoveryStatus, @@ -10,7 +10,7 @@ use zksync_types::{ StorageKey, StorageLog, H256, }; -pub(crate) async fn prepare_postgres(conn: &mut StorageProcessor<'_>) { +pub(crate) async fn prepare_postgres(conn: &mut StorageProcessor<'_, Server>) { if conn.blocks_dal().is_genesis_needed().await.unwrap() { conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -68,7 +68,7 @@ pub(crate) fn gen_storage_logs(indices: ops::Range) -> Vec { #[allow(clippy::default_trait_access)] // ^ `BaseSystemContractsHashes::default()` would require a new direct dependency pub(crate) async fn create_miniblock( - conn: &mut StorageProcessor<'_>, + conn: &mut StorageProcessor<'_, Server>, miniblock_number: MiniblockNumber, block_logs: Vec, ) { @@ -100,7 +100,7 @@ pub(crate) async fn create_miniblock( #[allow(clippy::default_trait_access)] // ^ `BaseSystemContractsHashes::default()` would require a new direct dependency pub(crate) async fn create_l1_batch( - conn: &mut StorageProcessor<'_>, + conn: &mut StorageProcessor<'_, Server>, l1_batch_number: L1BatchNumber, logs_for_initial_writes: &[StorageLog], ) { @@ -123,7 +123,7 @@ pub(crate) async fn create_l1_batch( } pub(crate) async fn prepare_postgres_for_snapshot_recovery( - conn: &mut StorageProcessor<'_>, + conn: &mut StorageProcessor<'_, Server>, ) -> (SnapshotRecoveryStatus, Vec) { conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index ee2f7944f56f..1fa4809c37aa 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -11,14 +11,13 @@ pub use event::{VmEvent, VmEventGroupKey}; use fee::encoding_len; pub use l1::L1TxCommonData; pub use l2::L2TxCommonData; -pub use protocol_version::{ProtocolUpgrade, ProtocolVersion, ProtocolVersionId}; +pub use protocol_upgrade::{ProtocolUpgrade, ProtocolVersion}; use serde::{Deserialize, Serialize}; pub use storage::*; pub use tx::{primitives::*, Execute}; -pub use vm_version::VmVersion; -pub use zksync_basic_types::*; +pub use zksync_basic_types::{protocol_version::ProtocolVersionId, vm_version::VmVersion, *}; -use crate::{l2::TransactionType, protocol_version::ProtocolUpgradeTxCommonData}; +use crate::{l2::TransactionType, protocol_upgrade::ProtocolUpgradeTxCommonData}; pub use crate::{Nonce, H256, U256, U64}; pub type SerialId = u64; @@ -36,7 +35,7 @@ pub mod l1; pub mod l2; pub mod l2_to_l1_log; pub mod priority_op_onchain_data; -pub mod protocol_version; +pub mod protocol_upgrade; pub mod pubdata_da; pub mod snapshots; pub mod storage; @@ -53,7 +52,6 @@ pub mod helpers; pub mod proto; pub mod transaction_request; pub mod utils; -pub mod vm_version; /// Denotes the first byte of the special zkSync's EIP-712-signed transaction. pub const EIP_712_TX_TYPE: u8 = 0x71; diff --git a/core/lib/types/src/protocol_version.rs b/core/lib/types/src/protocol_upgrade.rs similarity index 69% rename from core/lib/types/src/protocol_version.rs rename to core/lib/types/src/protocol_upgrade.rs index b204ecc01dba..4d17cd426d8e 100644 --- a/core/lib/types/src/protocol_version.rs +++ b/core/lib/types/src/protocol_upgrade.rs @@ -1,257 +1,18 @@ use std::convert::{TryFrom, TryInto}; -use num_enum::TryFromPrimitive; use serde::{Deserialize, Serialize}; +use zksync_basic_types::protocol_version::{L1VerifierConfig, ProtocolVersionId, VerifierParams}; use zksync_contracts::BaseSystemContractsHashes; use zksync_utils::u256_to_account_address; use crate::{ ethabi::{decode, encode, ParamType, Token}, helpers::unix_timestamp_ms, - web3::{ - contract::{tokens::Detokenize, Error}, - signing::keccak256, - }, - Address, Execute, ExecuteTransactionCommon, Log, Transaction, TransactionType, VmVersion, H256, + web3::signing::keccak256, + Address, Execute, ExecuteTransactionCommon, Log, Transaction, TransactionType, H256, PROTOCOL_UPGRADE_TX_TYPE, U256, }; -#[repr(u16)] -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[derive(TryFromPrimitive, Serialize, Deserialize)] -pub enum ProtocolVersionId { - Version0 = 0, - Version1, - Version2, - Version3, - Version4, - Version5, - Version6, - Version7, - Version8, - Version9, - Version10, - Version11, - Version12, - Version13, - Version14, - Version15, - Version16, - Version17, - Version18, - Version19, - Version20, - Version21, - Version22, -} - -impl ProtocolVersionId { - pub fn latest() -> Self { - Self::Version21 - } - - pub fn next() -> Self { - Self::Version22 - } - - /// Returns VM version to be used by API for this protocol version. - /// We temporary support only two latest VM versions for API. - pub fn into_api_vm_version(self) -> VmVersion { - match self { - ProtocolVersionId::Version0 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version1 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version2 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version3 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version4 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version5 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version6 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version7 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version8 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version9 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version10 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version11 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version12 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version13 => VmVersion::VmVirtualBlocks, - ProtocolVersionId::Version14 => VmVersion::VmVirtualBlocks, - ProtocolVersionId::Version15 => VmVersion::VmVirtualBlocks, - ProtocolVersionId::Version16 => VmVersion::VmVirtualBlocksRefundsEnhancement, - ProtocolVersionId::Version17 => VmVersion::VmVirtualBlocksRefundsEnhancement, - ProtocolVersionId::Version18 => VmVersion::VmBoojumIntegration, - ProtocolVersionId::Version19 => VmVersion::VmBoojumIntegration, - ProtocolVersionId::Version20 => VmVersion::Vm1_4_1, - ProtocolVersionId::Version21 => VmVersion::Vm1_4_2, - ProtocolVersionId::Version22 => VmVersion::Vm1_4_2, - } - } - - // It is possible that some external nodes do not store protocol versions for versions below 9. - // That's why we assume that whenever a protocol version is not present, version 9 is to be used. - pub fn last_potentially_undefined() -> Self { - Self::Version9 - } - - pub fn is_pre_boojum(&self) -> bool { - self <= &Self::Version17 - } - - pub fn is_pre_shared_bridge(&self) -> bool { - // TODO: review this when we actually deploy shared bridge - true - } - - pub fn is_1_4_0(&self) -> bool { - self >= &ProtocolVersionId::Version18 && self < &ProtocolVersionId::Version20 - } - - pub fn is_1_4_1(&self) -> bool { - self == &ProtocolVersionId::Version20 - } - - pub fn is_post_1_4_1(&self) -> bool { - self >= &ProtocolVersionId::Version20 - } - - pub fn is_post_1_4_2(&self) -> bool { - self >= &ProtocolVersionId::Version21 - } - - pub fn is_pre_1_4_2(&self) -> bool { - self < &ProtocolVersionId::Version21 - } -} - -impl Default for ProtocolVersionId { - fn default() -> Self { - Self::latest() - } -} - -impl TryFrom for ProtocolVersionId { - type Error = String; - - fn try_from(value: U256) -> Result { - if value > U256::from(u16::MAX) { - Err(format!("unknown protocol version ID: {}", value)) - } else { - (value.as_u32() as u16) - .try_into() - .map_err(|_| format!("unknown protocol version ID: {}", value)) - } - } -} - -// TODO: Do we even need this? I reckon we could merge this with `ProtocolVersionId`. -#[repr(u16)] -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -#[derive(TryFromPrimitive, Serialize, Deserialize)] -pub enum FriProtocolVersionId { - Version0 = 0, - Version1, - Version2, - Version3, - Version4, - Version5, - Version6, - Version7, - Version8, - Version9, - Version10, - Version11, - Version12, - Version13, - Version14, - Version15, - Version16, - Version17, - Version18, - Version19, - Version20, - Version21, - Version22, -} - -impl FriProtocolVersionId { - pub fn latest() -> Self { - Self::Version21 - } - - pub fn next() -> Self { - Self::Version22 - } -} - -impl Default for FriProtocolVersionId { - fn default() -> Self { - Self::latest() - } -} - -impl From for FriProtocolVersionId { - fn from(protocol_version: ProtocolVersionId) -> Self { - match protocol_version { - ProtocolVersionId::Version0 => FriProtocolVersionId::Version0, - ProtocolVersionId::Version1 => FriProtocolVersionId::Version1, - ProtocolVersionId::Version2 => FriProtocolVersionId::Version2, - ProtocolVersionId::Version3 => FriProtocolVersionId::Version3, - ProtocolVersionId::Version4 => FriProtocolVersionId::Version4, - ProtocolVersionId::Version5 => FriProtocolVersionId::Version5, - ProtocolVersionId::Version6 => FriProtocolVersionId::Version6, - ProtocolVersionId::Version7 => FriProtocolVersionId::Version7, - ProtocolVersionId::Version8 => FriProtocolVersionId::Version8, - ProtocolVersionId::Version9 => FriProtocolVersionId::Version9, - ProtocolVersionId::Version10 => FriProtocolVersionId::Version10, - ProtocolVersionId::Version11 => FriProtocolVersionId::Version11, - ProtocolVersionId::Version12 => FriProtocolVersionId::Version12, - ProtocolVersionId::Version13 => FriProtocolVersionId::Version13, - ProtocolVersionId::Version14 => FriProtocolVersionId::Version14, - ProtocolVersionId::Version15 => FriProtocolVersionId::Version15, - ProtocolVersionId::Version16 => FriProtocolVersionId::Version16, - ProtocolVersionId::Version17 => FriProtocolVersionId::Version17, - ProtocolVersionId::Version18 => FriProtocolVersionId::Version18, - ProtocolVersionId::Version19 => FriProtocolVersionId::Version19, - ProtocolVersionId::Version20 => FriProtocolVersionId::Version20, - ProtocolVersionId::Version21 => FriProtocolVersionId::Version21, - ProtocolVersionId::Version22 => FriProtocolVersionId::Version22, - } - } -} - -#[derive(Debug, Clone, Copy, Default, Eq, PartialEq, Serialize, Deserialize)] -pub struct VerifierParams { - pub recursion_node_level_vk_hash: H256, - pub recursion_leaf_level_vk_hash: H256, - pub recursion_circuits_set_vks_hash: H256, -} - -impl Detokenize for VerifierParams { - fn from_tokens(tokens: Vec) -> Result { - if tokens.len() != 1 { - return Err(Error::Abi(crate::ethabi::Error::InvalidData)); - } - - let tokens = match tokens[0].clone() { - Token::Tuple(tokens) => tokens, - _ => return Err(Error::Abi(crate::ethabi::Error::InvalidData)), - }; - - let vks_vec: Vec = tokens - .into_iter() - .map(|token| H256::from_slice(&token.into_fixed_bytes().unwrap())) - .collect(); - Ok(VerifierParams { - recursion_node_level_vk_hash: vks_vec[0], - recursion_leaf_level_vk_hash: vks_vec[1], - recursion_circuits_set_vks_hash: vks_vec[2], - }) - } -} - -#[derive(Debug, Clone, Copy, Default, Eq, PartialEq, Serialize, Deserialize)] -pub struct L1VerifierConfig { - pub params: VerifierParams, - pub recursion_scheduler_level_vk_hash: H256, -} - /// Represents a call to be made during governance operation. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Call { @@ -802,36 +563,6 @@ impl TryFrom for ProtocolUpgradeTx { } } -impl From for VmVersion { - fn from(value: ProtocolVersionId) -> Self { - match value { - ProtocolVersionId::Version0 => VmVersion::M5WithoutRefunds, - ProtocolVersionId::Version1 => VmVersion::M5WithoutRefunds, - ProtocolVersionId::Version2 => VmVersion::M5WithRefunds, - ProtocolVersionId::Version3 => VmVersion::M5WithRefunds, - ProtocolVersionId::Version4 => VmVersion::M6Initial, - ProtocolVersionId::Version5 => VmVersion::M6BugWithCompressionFixed, - ProtocolVersionId::Version6 => VmVersion::M6BugWithCompressionFixed, - ProtocolVersionId::Version7 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version8 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version9 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version10 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version11 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version12 => VmVersion::Vm1_3_2, - ProtocolVersionId::Version13 => VmVersion::VmVirtualBlocks, - ProtocolVersionId::Version14 => VmVersion::VmVirtualBlocks, - ProtocolVersionId::Version15 => VmVersion::VmVirtualBlocks, - ProtocolVersionId::Version16 => VmVersion::VmVirtualBlocksRefundsEnhancement, - ProtocolVersionId::Version17 => VmVersion::VmVirtualBlocksRefundsEnhancement, - ProtocolVersionId::Version18 => VmVersion::VmBoojumIntegration, - ProtocolVersionId::Version19 => VmVersion::VmBoojumIntegration, - ProtocolVersionId::Version20 => VmVersion::Vm1_4_1, - ProtocolVersionId::Version21 => VmVersion::Vm1_4_2, - ProtocolVersionId::Version22 => VmVersion::Vm1_4_2, - } - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/vm_utils/src/lib.rs b/core/lib/vm_utils/src/lib.rs index 5a661e433fbd..12b3be659e5d 100644 --- a/core/lib/vm_utils/src/lib.rs +++ b/core/lib/vm_utils/src/lib.rs @@ -7,7 +7,7 @@ use multivm::{ VmInstance, }; use tokio::runtime::Handle; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, StorageProcessor}; use zksync_state::{PostgresStorage, StoragePtr, StorageView, WriteStorage}; use zksync_types::{L1BatchNumber, L2ChainId, Transaction}; @@ -21,7 +21,7 @@ pub type VmAndStorage<'a> = ( pub fn create_vm( rt_handle: Handle, l1_batch_number: L1BatchNumber, - mut connection: StorageProcessor<'_>, + mut connection: StorageProcessor<'_, Server>, l2_chain_id: L2ChainId, ) -> anyhow::Result { let l1_batch_params_provider = rt_handle diff --git a/core/lib/vm_utils/src/storage.rs b/core/lib/vm_utils/src/storage.rs index 3c6d8d0221ba..bc22ec7e99f4 100644 --- a/core/lib/vm_utils/src/storage.rs +++ b/core/lib/vm_utils/src/storage.rs @@ -7,7 +7,7 @@ use multivm::{ zk_evm_latest::ethereum_types::H256, }; use zksync_contracts::BaseSystemContracts; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_types::{ block::MiniblockHeader, fee_model::BatchFeeInput, snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, ZKPORTER_IS_AVAILABLE, @@ -89,7 +89,7 @@ pub struct L1BatchParamsProvider { } impl L1BatchParamsProvider { - pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { + pub async fn new(storage: &mut StorageProcessor<'_, Server>) -> anyhow::Result { let snapshot = storage .snapshot_recovery_dal() .get_applied_snapshot_status() @@ -101,7 +101,7 @@ impl L1BatchParamsProvider { /// if necessary. pub async fn wait_for_l1_batch_params( &self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, number: L1BatchNumber, ) -> anyhow::Result<(H256, u64)> { let first_l1_batch = if let Some(snapshot) = &self.snapshot { @@ -122,7 +122,7 @@ impl L1BatchParamsProvider { } async fn wait_for_l1_batch_params_unchecked( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, number: L1BatchNumber, ) -> anyhow::Result<(H256, u64)> { // If the state root is not known yet, this duration will be used to back off in the while loops @@ -148,7 +148,7 @@ impl L1BatchParamsProvider { pub async fn load_l1_batch_protocol_version( &self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, l1_batch_number: L1BatchNumber, ) -> anyhow::Result> { if let Some(snapshot) = &self.snapshot { @@ -172,7 +172,7 @@ impl L1BatchParamsProvider { /// Returns a header of the first miniblock in the specified L1 batch regardless of whether the batch is sealed or not. pub async fn load_first_miniblock_in_batch( &self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, l1_batch_number: L1BatchNumber, ) -> anyhow::Result> { let miniblock_number = self @@ -196,7 +196,7 @@ impl L1BatchParamsProvider { #[doc(hidden)] // public for testing purposes pub async fn load_number_of_first_miniblock_in_batch( &self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, l1_batch_number: L1BatchNumber, ) -> anyhow::Result> { if l1_batch_number == L1BatchNumber(0) { @@ -232,7 +232,7 @@ impl L1BatchParamsProvider { /// Loads VM-related L1 batch parameters for the specified batch. pub async fn load_l1_batch_params( &self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, first_miniblock_in_batch: &FirstMiniblockInBatch, validation_computational_gas_limit: u32, chain_id: L2ChainId, diff --git a/core/lib/zksync_core/Cargo.toml b/core/lib/zksync_core/Cargo.toml index fc2d751c2473..a7f254c26355 100644 --- a/core/lib/zksync_core/Cargo.toml +++ b/core/lib/zksync_core/Cargo.toml @@ -17,6 +17,8 @@ zksync_state = { path = "../state" } vm_utils = { path = "../vm_utils" } zksync_types = { path = "../types" } zksync_dal = { path = "../dal" } +prover_dal = { path = "../../../prover/prover_dal" } +zksync_db_connection = { path = "../db_connection" } zksync_config = { path = "../config" } zksync_env_config = { path = "../env_config" } zksync_protobuf_config = { path = "../protobuf_config" } diff --git a/core/lib/zksync_core/src/api_server/contract_verification/api_decl.rs b/core/lib/zksync_core/src/api_server/contract_verification/api_decl.rs index 553f6f2ad45e..75db409dfe05 100644 --- a/core/lib/zksync_core/src/api_server/contract_verification/api_decl.rs +++ b/core/lib/zksync_core/src/api_server/contract_verification/api_decl.rs @@ -1,16 +1,16 @@ use actix_web::web; -use zksync_dal::connection::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; #[derive(Debug, Clone)] pub struct RestApi { - pub(super) master_connection_pool: ConnectionPool, - pub(super) replica_connection_pool: ConnectionPool, + pub(super) master_connection_pool: ConnectionPool, + pub(super) replica_connection_pool: ConnectionPool, } impl RestApi { pub fn new( - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, + master_connection_pool: ConnectionPool, + replica_connection_pool: ConnectionPool, ) -> Self { Self { master_connection_pool, diff --git a/core/lib/zksync_core/src/api_server/contract_verification/api_impl.rs b/core/lib/zksync_core/src/api_server/contract_verification/api_impl.rs index 810f8d32e48b..b164d640a645 100644 --- a/core/lib/zksync_core/src/api_server/contract_verification/api_impl.rs +++ b/core/lib/zksync_core/src/api_server/contract_verification/api_impl.rs @@ -3,6 +3,7 @@ use actix_web::{ HttpResponse, Result as ActixResult, }; use serde::Serialize; +use zksync_dal::ServerDals; use zksync_types::{contract_verification_api::VerificationIncomingRequest, Address}; use super::{api_decl::RestApi, metrics::METRICS}; diff --git a/core/lib/zksync_core/src/api_server/contract_verification/mod.rs b/core/lib/zksync_core/src/api_server/contract_verification/mod.rs index 27d36429d035..3f6dd27e73bd 100644 --- a/core/lib/zksync_core/src/api_server/contract_verification/mod.rs +++ b/core/lib/zksync_core/src/api_server/contract_verification/mod.rs @@ -4,7 +4,7 @@ use actix_cors::Cors; use actix_web::{dev::Server, web, App, HttpResponse, HttpServer}; use tokio::{sync::watch, task::JoinHandle}; use zksync_config::configs::api::ContractVerificationApiConfig; -use zksync_dal::connection::ConnectionPool; +use zksync_dal::ConnectionPool; use zksync_utils::panic_notify::{spawn_panic_handler, ThreadPanicNotify}; use self::api_decl::RestApi; @@ -43,8 +43,8 @@ fn start_server(api: RestApi, bind_to: SocketAddr) -> Server { /// Start HTTP REST API pub fn start_server_thread_detached( - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, + master_connection_pool: ConnectionPool, + replica_connection_pool: ConnectionPool, api_config: ContractVerificationApiConfig, mut stop_receiver: watch::Receiver, ) -> JoinHandle> { diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs index 147c8743bdb9..2159bf851869 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs @@ -16,7 +16,7 @@ use multivm::{ VmInstance, }; use tokio::runtime::Handle; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_state::{PostgresStorage, ReadStorage, StoragePtr, StorageView, WriteStorage}; use zksync_system_constants::{ SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, @@ -51,7 +51,7 @@ struct Sandbox<'a> { impl<'a> Sandbox<'a> { async fn new( - mut connection: StorageProcessor<'a>, + mut connection: StorageProcessor<'a, Server>, shared_args: TxSharedArgs, execution_args: &'a TxExecutionArgs, block_args: BlockArgs, @@ -108,7 +108,7 @@ impl<'a> Sandbox<'a> { } async fn load_l2_block_info( - connection: &mut StorageProcessor<'_>, + connection: &mut StorageProcessor<'_, Server>, is_pending_block: bool, resolved_block_info: &ResolvedBlockInfo, ) -> anyhow::Result<(L2BlockEnv, Option)> { @@ -291,7 +291,7 @@ pub(super) fn apply_vm_in_sandbox( // current L1 prices for gas or pubdata. adjust_pubdata_price: bool, execution_args: &TxExecutionArgs, - connection_pool: &ConnectionPool, + connection_pool: &ConnectionPool, tx: Transaction, block_args: BlockArgs, apply: impl FnOnce( @@ -353,7 +353,7 @@ struct StoredL2BlockInfo { impl StoredL2BlockInfo { /// If `miniblock_hash` is `None`, it needs to be fetched from the storage. async fn new( - connection: &mut StorageProcessor<'_>, + connection: &mut StorageProcessor<'_, Server>, miniblock_number: MiniblockNumber, miniblock_hash: Option, ) -> anyhow::Result { @@ -427,7 +427,7 @@ impl BlockArgs { async fn resolve_block_info( &self, - connection: &mut StorageProcessor<'_>, + connection: &mut StorageProcessor<'_, Server>, ) -> anyhow::Result { let (state_l2_block_number, vm_l1_batch_number, l1_batch_timestamp); diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs index f194ce3bbb1b..03936c3822e3 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs @@ -8,7 +8,7 @@ use multivm::{ MultiVMTracer, }; use tracing::{span, Level}; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use zksync_types::{ fee::TransactionExecutionMetrics, l2::L2Tx, ExecuteTransactionCommon, Nonce, PackedEthSignature, Transaction, U256, @@ -108,7 +108,7 @@ impl TransactionExecutor { // current L1 prices for gas or pubdata. adjust_pubdata_price: bool, execution_args: TxExecutionArgs, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, tx: Transaction, block_args: BlockArgs, custom_tracers: Vec, @@ -169,7 +169,7 @@ impl TransactionExecutor { &self, vm_permit: VmPermit, shared_args: TxSharedArgs, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, mut tx: L2Tx, block_args: BlockArgs, vm_execution_cache_misses_limit: Option, diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs index 4e40cacae958..224afd08847a 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs @@ -2,7 +2,7 @@ use std::{sync::Arc, time::Duration}; use anyhow::Context as _; use tokio::runtime::Handle; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_state::{PostgresStorage, PostgresStorageCaches, ReadStorage, StorageView}; use zksync_system_constants::PUBLISH_BYTECODE_OVERHEAD; use zksync_types::{ @@ -149,7 +149,7 @@ impl VmConcurrencyLimiter { } async fn get_pending_state( - connection: &mut StorageProcessor<'_>, + connection: &mut StorageProcessor<'_, Server>, ) -> anyhow::Result<(api::BlockId, MiniblockNumber)> { let block_id = api::BlockId::Number(api::BlockNumber::Pending); let resolved_block_number = connection @@ -164,7 +164,7 @@ async fn get_pending_state( /// Returns the number of the pubdata that the transaction will spend on factory deps. pub(super) async fn get_pubdata_for_factory_deps( _vm_permit: &VmPermit, - connection_pool: &ConnectionPool, + connection_pool: &ConnectionPool, factory_deps: &[Vec], storage_caches: PostgresStorageCaches, ) -> anyhow::Result { @@ -243,7 +243,7 @@ pub(crate) struct BlockStartInfo { } impl BlockStartInfo { - pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { + pub async fn new(storage: &mut StorageProcessor<'_, Server>) -> anyhow::Result { let snapshot_recovery = storage .snapshot_recovery_dal() .get_applied_snapshot_status() @@ -296,7 +296,9 @@ pub(crate) struct BlockArgs { } impl BlockArgs { - pub(crate) async fn pending(connection: &mut StorageProcessor<'_>) -> anyhow::Result { + pub(crate) async fn pending( + connection: &mut StorageProcessor<'_, Server>, + ) -> anyhow::Result { let (block_id, resolved_block_number) = get_pending_state(connection).await?; Ok(Self { block_id, @@ -307,7 +309,7 @@ impl BlockArgs { /// Loads block information from DB. pub async fn new( - connection: &mut StorageProcessor<'_>, + connection: &mut StorageProcessor<'_, Server>, block_id: api::BlockId, start_info: BlockStartInfo, ) -> Result { diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs index 473f3d897424..144c3b45268f 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs @@ -11,7 +11,7 @@ use crate::{ #[tokio::test] async fn creating_block_args() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await @@ -66,7 +66,7 @@ async fn creating_block_args() { #[tokio::test] async fn creating_block_args_after_snapshot_recovery() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; @@ -158,7 +158,7 @@ async fn creating_block_args_after_snapshot_recovery() { #[tokio::test] async fn instantiating_vm() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await @@ -173,7 +173,7 @@ async fn instantiating_vm() { test_instantiating_vm(pool.clone(), block_args).await; } -async fn test_instantiating_vm(pool: ConnectionPool, block_args: BlockArgs) { +async fn test_instantiating_vm(pool: ConnectionPool, block_args: BlockArgs) { let (vm_concurrency_limiter, _) = VmConcurrencyLimiter::new(1); let vm_permit = vm_concurrency_limiter.acquire().await.unwrap(); let transaction = create_l2_transaction(10, 100).into(); diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs index 462977a8d36c..0bc4db366d51 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs @@ -10,7 +10,7 @@ use multivm::{ vm_latest::HistoryDisabled, MultiVMTracer, }; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_types::{l2::L2Tx, Transaction, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS}; use super::{ @@ -33,7 +33,7 @@ pub(crate) enum ValidationError { impl TransactionExecutor { pub(crate) async fn validate_tx_in_sandbox( &self, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, vm_permit: VmPermit, tx: L2Tx, shared_args: TxSharedArgs, @@ -117,7 +117,7 @@ impl TransactionExecutor { /// trusted to change between validation and execution in general case, but /// sometimes we can safely rely on them to not change often. async fn get_validation_params( - connection: &mut StorageProcessor<'_>, + connection: &mut StorageProcessor<'_, Server>, tx: &L2Tx, computational_gas_limit: u32, ) -> anyhow::Result { diff --git a/core/lib/zksync_core/src/api_server/tree/tests.rs b/core/lib/zksync_core/src/api_server/tree/tests.rs index bf1a69bb33af..a279253816ae 100644 --- a/core/lib/zksync_core/src/api_server/tree/tests.rs +++ b/core/lib/zksync_core/src/api_server/tree/tests.rs @@ -4,7 +4,7 @@ use std::net::Ipv4Addr; use assert_matches::assert_matches; use tempfile::TempDir; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use super::*; use crate::metadata_calculator::tests::{ @@ -13,7 +13,7 @@ use crate::metadata_calculator::tests::{ #[tokio::test] async fn merkle_tree_api() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; let api_addr = (Ipv4Addr::LOCALHOST, 0).into(); @@ -75,7 +75,7 @@ async fn merkle_tree_api() { #[tokio::test] async fn local_merkle_tree_client() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; diff --git a/core/lib/zksync_core/src/api_server/tx_sender/master_pool_sink.rs b/core/lib/zksync_core/src/api_server/tx_sender/master_pool_sink.rs index f6fc7e729371..355055371d9d 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/master_pool_sink.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/master_pool_sink.rs @@ -1,4 +1,4 @@ -use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool}; +use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool, Server, ServerDals}; use zksync_types::{fee::TransactionExecutionMetrics, l2::L2Tx}; use super::{tx_sink::TxSink, SubmitTxError}; @@ -7,11 +7,11 @@ use crate::metrics::{TxStage, APP_METRICS}; /// Wrapper for the master DB pool that allows to submit transactions to the mempool. #[derive(Debug)] pub struct MasterPoolSink { - master_pool: ConnectionPool, + master_pool: ConnectionPool, } impl MasterPoolSink { - pub fn new(master_pool: ConnectionPool) -> Self { + pub fn new(master_pool: ConnectionPool) -> Self { Self { master_pool } } } diff --git a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs index cdaf6561445e..2634e5c00b85 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs @@ -10,7 +10,9 @@ use multivm::{ }; use zksync_config::configs::{api::Web3JsonRpcConfig, chain::StateKeeperConfig}; use zksync_contracts::BaseSystemContracts; -use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool, StorageProcessor}; +use zksync_dal::{ + transactions_dal::L2TxSubmissionResult, ConnectionPool, Server, ServerDals, StorageProcessor, +}; use zksync_state::PostgresStorageCaches; use zksync_types::{ fee::{Fee, TransactionExecutionMetrics}, @@ -145,7 +147,7 @@ pub struct TxSenderBuilder { /// Shared TxSender configuration. config: TxSenderConfig, /// Connection pool for read requests. - replica_connection_pool: ConnectionPool, + replica_connection_pool: ConnectionPool, /// Sink to be used to persist transactions. tx_sink: Arc, /// Batch sealer used to check whether transaction can be executed by the sequencer. @@ -155,7 +157,7 @@ pub struct TxSenderBuilder { impl TxSenderBuilder { pub fn new( config: TxSenderConfig, - replica_connection_pool: ConnectionPool, + replica_connection_pool: ConnectionPool, tx_sink: Arc, ) -> Self { Self { @@ -238,7 +240,7 @@ pub struct TxSenderInner { pub(super) sender_config: TxSenderConfig, /// Sink to be used to persist transactions. pub tx_sink: Arc, - pub replica_connection_pool: ConnectionPool, + pub replica_connection_pool: ConnectionPool, // Used to keep track of gas prices for the fee ticker. pub batch_fee_input_provider: Arc, pub(super) api_contracts: ApiContracts, @@ -269,7 +271,7 @@ impl TxSender { self.0.storage_caches.clone() } - async fn acquire_replica_connection(&self) -> anyhow::Result> { + async fn acquire_replica_connection(&self) -> anyhow::Result> { self.0 .replica_connection_pool .access_storage_tagged("api") diff --git a/core/lib/zksync_core/src/api_server/tx_sender/proxy.rs b/core/lib/zksync_core/src/api_server/tx_sender/proxy.rs index a7cccbd536eb..c83a6d5ee8bb 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/proxy.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/proxy.rs @@ -6,7 +6,7 @@ use std::{ }; use tokio::sync::{watch, RwLock}; -use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool}; +use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool, Server, ServerDals}; use zksync_types::{ api::{BlockId, Transaction, TransactionDetails, TransactionId}, fee::TransactionExecutionMetrics, @@ -64,7 +64,7 @@ impl TxCache { async fn run_updates( self, - pool: ConnectionPool, + pool: ConnectionPool, stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { const UPDATE_INTERVAL: Duration = Duration::from_secs(1); @@ -204,7 +204,7 @@ impl TxProxy { pub fn run_account_nonce_sweeper( &self, - pool: ConnectionPool, + pool: ConnectionPool, stop_receiver: watch::Receiver, ) -> impl Future> { let tx_cache = self.tx_cache.clone(); diff --git a/core/lib/zksync_core/src/api_server/tx_sender/tests.rs b/core/lib/zksync_core/src/api_server/tx_sender/tests.rs index d3790f82c5ae..e62033a6357e 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/tests.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/tests.rs @@ -10,7 +10,7 @@ use crate::{ }; pub(crate) async fn create_test_tx_sender( - pool: ConnectionPool, + pool: ConnectionPool, l2_chain_id: L2ChainId, tx_executor: TransactionExecutor, ) -> (TxSender, VmConcurrencyBarrier) { @@ -39,7 +39,7 @@ pub(crate) async fn create_test_tx_sender( async fn getting_nonce_for_account() { let l2_chain_id = L2ChainId::default(); let test_address = Address::repeat_byte(1); - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis_state(&mut storage, l2_chain_id, &GenesisParams::mock()) .await @@ -86,7 +86,7 @@ async fn getting_nonce_for_account() { async fn getting_nonce_for_account_after_snapshot_recovery() { const SNAPSHOT_MINIBLOCK_NUMBER: MiniblockNumber = MiniblockNumber(42); - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let test_address = Address::repeat_byte(1); let other_address = Address::repeat_byte(2); diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs index 3049d08396c3..40b545c25b0e 100644 --- a/core/lib/zksync_core/src/api_server/web3/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/mod.rs @@ -9,7 +9,7 @@ use tokio::{ task::JoinHandle, }; use tower_http::{cors::CorsLayer, metrics::InFlightRequestsLayer}; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use zksync_health_check::{HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_types::MiniblockNumber; use zksync_web3_decl::{ @@ -123,8 +123,8 @@ struct OptionalApiParams { /// maintenance tasks. #[derive(Debug)] pub struct ApiServer { - pool: ConnectionPool, - updaters_pool: ConnectionPool, + pool: ConnectionPool, + updaters_pool: ConnectionPool, health_updater: Arc, config: InternalApiConfig, transport: ApiTransport, @@ -137,8 +137,8 @@ pub struct ApiServer { #[derive(Debug)] pub struct ApiBuilder { - pool: ConnectionPool, - updaters_pool: ConnectionPool, + pool: ConnectionPool, + updaters_pool: ConnectionPool, config: InternalApiConfig, polling_interval: Duration, // Mandatory params that must be set using builder methods. @@ -154,7 +154,7 @@ pub struct ApiBuilder { impl ApiBuilder { const DEFAULT_POLLING_INTERVAL: Duration = Duration::from_millis(200); - pub fn jsonrpsee_backend(config: InternalApiConfig, pool: ConnectionPool) -> Self { + pub fn jsonrpsee_backend(config: InternalApiConfig, pool: ConnectionPool) -> Self { Self { updaters_pool: pool.clone(), pool, @@ -182,7 +182,7 @@ impl ApiBuilder { /// such as last mined block number or account nonces. This pool is used to execute /// in a background task. If not called, the main pool will be used. If the API server is under high load, /// it may make sense to supply a single-connection pool to reduce pool contention with the API methods. - pub fn with_updaters_pool(mut self, pool: ConnectionPool) -> Self { + pub fn with_updaters_pool(mut self, pool: ConnectionPool) -> Self { self.updaters_pool = pool; self } diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs index 94327c2da691..fe9539c7c805 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use anyhow::Context as _; use multivm::{interface::ExecutionResult, vm_latest::constants::BLOCK_GAS_LIMIT}; use once_cell::sync::OnceCell; +use zksync_dal::ServerDals; use zksync_system_constants::MAX_ENCODED_TX_SIZE; use zksync_types::{ api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs index 7acfab0266a8..85cc254afa4b 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs @@ -1,4 +1,5 @@ use anyhow::Context as _; +use zksync_dal::ServerDals; use zksync_types::{api::en, tokens::TokenInfo, MiniblockNumber}; use zksync_web3_decl::error::Web3Error; diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs index 9cae237d3188..05bab4c535cf 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs @@ -1,4 +1,5 @@ use anyhow::Context as _; +use zksync_dal::ServerDals; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ api::{ diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs index 25d29a33099f..5f713d657326 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs @@ -1,4 +1,5 @@ use anyhow::Context as _; +use zksync_dal::ServerDals; use zksync_types::{ snapshots::{AllSnapshots, SnapshotHeader, SnapshotStorageLogsChunkMetadata}, L1BatchNumber, diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs index c662b78e240c..3dd8a850c687 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -1,7 +1,7 @@ use std::{collections::HashMap, convert::TryInto}; use anyhow::Context as _; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_mini_merkle_tree::MiniMerkleTree; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ @@ -45,7 +45,7 @@ impl ZksNamespace { &self.state.current_method } - async fn access_storage(&self) -> Result, Web3Error> { + async fn access_storage(&self) -> Result, Web3Error> { Ok(self .state .connection_pool @@ -275,7 +275,7 @@ impl ZksNamespace { async fn get_l2_to_l1_log_proof_inner( &self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, l1_batch_number: L1BatchNumber, index_in_filtered_logs: usize, log_filter: impl Fn(&L2ToL1Log) -> bool, diff --git a/core/lib/zksync_core/src/api_server/web3/pubsub.rs b/core/lib/zksync_core/src/api_server/web3/pubsub.rs index abbb35ae227c..4a46a0e1841b 100644 --- a/core/lib/zksync_core/src/api_server/web3/pubsub.rs +++ b/core/lib/zksync_core/src/api_server/web3/pubsub.rs @@ -7,7 +7,7 @@ use tokio::{ task::JoinHandle, time::{interval, Duration}, }; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server, ServerDals}; use zksync_types::{MiniblockNumber, H128, H256}; use zksync_web3_decl::{ jsonrpsee::{ @@ -51,7 +51,7 @@ pub(super) enum PubSubEvent { #[derive(Debug)] struct PubSubNotifier { sender: broadcast::Sender>, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, polling_interval: Duration, events_sender: Option>, } @@ -415,7 +415,7 @@ impl EthSubscribe { /// Spawns notifier tasks. This should be called once per instance. pub fn spawn_notifiers( &self, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, polling_interval: Duration, stop_receiver: watch::Receiver, ) -> Vec>> { diff --git a/core/lib/zksync_core/src/api_server/web3/state.rs b/core/lib/zksync_core/src/api_server/web3/state.rs index 808f4049f5f7..78ed8dfdad43 100644 --- a/core/lib/zksync_core/src/api_server/web3/state.rs +++ b/core/lib/zksync_core/src/api_server/web3/state.rs @@ -12,7 +12,7 @@ use lru::LruCache; use tokio::sync::{watch, Mutex}; use vise::GaugeGuard; use zksync_config::configs::{api::Web3JsonRpcConfig, chain::NetworkConfig, ContractsConfig}; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_types::{ api, l2::L2Tx, transaction_request::CallRequest, Address, L1BatchNumber, L1ChainId, L2ChainId, MiniblockNumber, H256, U256, U64, @@ -134,7 +134,7 @@ impl SealedMiniblockNumber { /// Creates a handle to the last sealed miniblock number together with a task that will update /// it on a schedule. pub fn new( - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, update_interval: Duration, stop_receiver: watch::Receiver, ) -> (Self, impl Future>) { @@ -202,7 +202,7 @@ impl SealedMiniblockNumber { pub(crate) struct RpcState { pub(super) current_method: Arc, pub(super) installed_filters: Option>>, - pub(super) connection_pool: ConnectionPool, + pub(super) connection_pool: ConnectionPool, pub(super) tree_api: Option>, pub(super) tx_sender: TxSender, pub(super) sync_state: Option, @@ -239,7 +239,7 @@ impl RpcState { /// Resolves the specified block ID to a block number, which is guaranteed to be present in the node storage. pub(crate) async fn resolve_block( &self, - connection: &mut StorageProcessor<'_>, + connection: &mut StorageProcessor<'_, Server>, block: api::BlockId, ) -> Result { self.start_info.ensure_not_pruned(block)?; @@ -260,7 +260,7 @@ impl RpcState { /// non-existing blocks. pub(crate) async fn resolve_block_unchecked( &self, - connection: &mut StorageProcessor<'_>, + connection: &mut StorageProcessor<'_, Server>, block: api::BlockId, ) -> Result, Web3Error> { self.start_info.ensure_not_pruned(block)?; @@ -279,7 +279,7 @@ impl RpcState { pub(crate) async fn resolve_block_args( &self, - connection: &mut StorageProcessor<'_>, + connection: &mut StorageProcessor<'_, Server>, block: api::BlockId, ) -> Result { BlockArgs::new(connection, block, self.start_info) diff --git a/core/lib/zksync_core/src/api_server/web3/tests/debug.rs b/core/lib/zksync_core/src/api_server/web3/tests/debug.rs index 05eeaa5e6974..386175668cad 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/debug.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/debug.rs @@ -34,7 +34,7 @@ struct TraceBlockTest(MiniblockNumber); #[async_trait] impl HttpTest for TraceBlockTest { - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let tx_results = [0, 1, 2].map(execute_l2_transaction_with_traces); let mut storage = pool.access_storage().await?; let new_miniblock = store_miniblock(&mut storage, self.0, &tx_results).await?; @@ -97,7 +97,7 @@ struct TraceBlockFlatTest(MiniblockNumber); #[async_trait] impl HttpTest for TraceBlockFlatTest { - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let tx_results = [0, 1, 2].map(execute_l2_transaction_with_traces); let mut storage = pool.access_storage().await?; let _new_miniblock = store_miniblock(&mut storage, self.0, &tx_results).await?; @@ -173,7 +173,7 @@ struct TraceTransactionTest; #[async_trait] impl HttpTest for TraceTransactionTest { - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let tx_results = [execute_l2_transaction_with_traces(0)]; let mut storage = pool.access_storage().await?; store_miniblock(&mut storage, MiniblockNumber(1), &tx_results).await?; @@ -212,7 +212,7 @@ impl HttpTest for TraceBlockTestWithSnapshotRecovery { StorageInitialization::empty_recovery() } - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let snapshot_miniblock_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK; let missing_miniblock_numbers = [ MiniblockNumber(0), diff --git a/core/lib/zksync_core/src/api_server/web3/tests/filters.rs b/core/lib/zksync_core/src/api_server/web3/tests/filters.rs index 76337be9f693..1a57d57de54c 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/filters.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/filters.rs @@ -22,7 +22,7 @@ impl HttpTest for BasicFilterChangesTest { } } - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let block_filter_id = client.new_block_filter().await?; let tx_filter_id = client.new_pending_transaction_filter().await?; @@ -104,7 +104,7 @@ impl HttpTest for LogFilterChangesTest { } } - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let all_logs_filter_id = client.new_filter(Filter::default()).await?; let address_filter = Filter { address: Some(Address::repeat_byte(23).into()), @@ -175,7 +175,7 @@ struct LogFilterChangesWithBlockBoundariesTest; #[async_trait] impl HttpTest for LogFilterChangesWithBlockBoundariesTest { - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let lower_bound_filter = Filter { from_block: Some(api::BlockNumber::Number(2.into())), ..Filter::default() @@ -279,7 +279,11 @@ struct DisableFiltersTest; #[async_trait] impl HttpTest for DisableFiltersTest { - async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test( + &self, + client: &HttpClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { let filter = Filter { from_block: Some(api::BlockNumber::Number(2.into())), ..Filter::default() diff --git a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs index 9a996bbd687f..964eef15de61 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs @@ -15,7 +15,9 @@ use zksync_config::configs::{ chain::{NetworkConfig, StateKeeperConfig}, ContractsConfig, }; -use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool, StorageProcessor}; +use zksync_dal::{ + transactions_dal::L2TxSubmissionResult, ConnectionPool, ServerDals, StorageProcessor, +}; use zksync_health_check::CheckHealth; use zksync_types::{ api, @@ -106,7 +108,7 @@ impl ApiServerHandles { pub(crate) async fn spawn_http_server( api_config: InternalApiConfig, - pool: ConnectionPool, + pool: ConnectionPool, tx_executor: MockTransactionExecutor, method_tracer: Arc, stop_receiver: watch::Receiver, @@ -126,7 +128,7 @@ pub(crate) async fn spawn_http_server( async fn spawn_ws_server( api_config: InternalApiConfig, - pool: ConnectionPool, + pool: ConnectionPool, stop_receiver: watch::Receiver, websocket_requests_per_minute_limit: Option, ) -> (ApiServerHandles, mpsc::UnboundedReceiver) { @@ -145,7 +147,7 @@ async fn spawn_ws_server( async fn spawn_server( transport: ApiTransportLabel, api_config: InternalApiConfig, - pool: ConnectionPool, + pool: ConnectionPool, websocket_requests_per_minute_limit: Option, tx_executor: MockTransactionExecutor, method_tracer: Arc, @@ -201,7 +203,7 @@ trait HttpTest: Send + Sync { Arc::default() } - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()>; + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()>; /// Overrides the `filters_disabled` configuration parameter for HTTP server startup fn filters_disabled(&self) -> bool { @@ -233,7 +235,7 @@ impl StorageInitialization { async fn prepare_storage( &self, network_config: &NetworkConfig, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, ) -> anyhow::Result<()> { match self { Self::Genesis => { @@ -269,7 +271,7 @@ impl StorageInitialization { } async fn test_http_server(test: impl HttpTest) { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let network_config = NetworkConfig::for_tests(); let mut storage = pool.access_storage().await.unwrap(); test.storage_initialization() @@ -340,7 +342,7 @@ fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionResult { /// Stores miniblock #1 with a single transaction and returns the miniblock header + transaction hash. async fn store_miniblock( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, number: MiniblockNumber, transaction_results: &[TransactionExecutionResult], ) -> anyhow::Result { @@ -366,7 +368,7 @@ async fn store_miniblock( } async fn seal_l1_batch( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, number: L1BatchNumber, ) -> anyhow::Result<()> { let header = create_l1_batch(number.0); @@ -391,7 +393,7 @@ async fn seal_l1_batch( } async fn store_events( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, miniblock_number: u32, start_idx: u32, ) -> anyhow::Result<(IncludedTxLocation, Vec)> { @@ -451,7 +453,11 @@ struct HttpServerBasicsTest; #[async_trait] impl HttpTest for HttpServerBasicsTest { - async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test( + &self, + client: &HttpClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { let block_number = client.get_block_number().await?; assert_eq!(block_number, U64::from(0)); @@ -481,7 +487,11 @@ impl HttpTest for BlockMethodsWithSnapshotRecovery { StorageInitialization::empty_recovery() } - async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test( + &self, + client: &HttpClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { let block = client.get_block_by_number(1_000.into(), false).await?; assert!(block.is_none()); @@ -554,7 +564,11 @@ impl HttpTest for L1BatchMethodsWithSnapshotRecovery { StorageInitialization::empty_recovery() } - async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test( + &self, + client: &HttpClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { let miniblock_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; let l1_batch_number = StorageInitialization::SNAPSHOT_RECOVERY_BATCH + 1; assert_eq!( @@ -645,7 +659,11 @@ impl HttpTest for StorageAccessWithSnapshotRecovery { StorageInitialization::Recovery { logs, factory_deps } } - async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test( + &self, + client: &HttpClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { let address = Address::repeat_byte(1); let first_local_miniblock = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; for number in [0, 1, first_local_miniblock.0 - 1] { @@ -686,7 +704,7 @@ struct TransactionCountTest; #[async_trait] impl HttpTest for TransactionCountTest { - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let test_address = Address::repeat_byte(11); let mut storage = pool.access_storage().await?; let mut miniblock_number = MiniblockNumber(0); @@ -783,7 +801,7 @@ impl HttpTest for TransactionCountAfterSnapshotRecoveryTest { } } - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let test_address = Address::repeat_byte(11); let pending_count = client.get_transaction_count(test_address, None).await?; assert_eq!(pending_count, 3.into()); @@ -837,7 +855,7 @@ struct TransactionReceiptsTest; #[async_trait] impl HttpTest for TransactionReceiptsTest { - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let mut storage = pool.access_storage().await?; let miniblock_number = MiniblockNumber(1); @@ -895,7 +913,7 @@ impl AllAccountBalancesTest { #[async_trait] impl HttpTest for AllAccountBalancesTest { - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let balances = client.get_all_account_balances(Self::ADDRESS).await?; assert_eq!(balances, HashMap::new()); @@ -967,7 +985,11 @@ impl HttpTest for RpcCallsTracingTest { self.tracer.clone() } - async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test( + &self, + client: &HttpClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { let block_number = client.get_block_number().await?; assert_eq!(block_number, U64::from(0)); diff --git a/core/lib/zksync_core/src/api_server/web3/tests/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/tests/snapshots.rs index 1765a7c2397d..bb73962a355a 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/snapshots.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/snapshots.rs @@ -27,7 +27,7 @@ impl SnapshotBasicsTest { #[async_trait] impl HttpTest for SnapshotBasicsTest { - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let mut storage = pool.access_storage().await.unwrap(); store_miniblock( &mut storage, diff --git a/core/lib/zksync_core/src/api_server/web3/tests/vm.rs b/core/lib/zksync_core/src/api_server/web3/tests/vm.rs index 22760c37feb1..316b07e9681f 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/vm.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/vm.rs @@ -50,7 +50,11 @@ impl HttpTest for CallTest { Self::create_executor(MiniblockNumber(0)) } - async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test( + &self, + client: &HttpClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { let call_result = client.call(Self::call_request(b"pending"), None).await?; assert_eq!(call_result.0, b"output"); @@ -102,7 +106,11 @@ impl HttpTest for CallTestAfterSnapshotRecovery { CallTest::create_executor(first_local_miniblock) } - async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test( + &self, + client: &HttpClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { let call_result = client .call(CallTest::call_request(b"pending"), None) .await?; @@ -218,7 +226,7 @@ impl HttpTest for SendRawTransactionTest { tx_executor } - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { if !self.snapshot_recovery { // Manually set sufficient balance for the transaction account. let mut storage = pool.access_storage().await?; @@ -280,7 +288,11 @@ impl HttpTest for TraceCallTest { CallTest::create_executor(MiniblockNumber(0)) } - async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test( + &self, + client: &HttpClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { let call_request = CallTest::call_request(b"pending"); let call_result = client.trace_call(call_request.clone(), None, None).await?; Self::assert_debug_call(&call_request, &call_result); @@ -345,7 +357,11 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { CallTest::create_executor(number) } - async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test( + &self, + client: &HttpClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { let call_request = CallTest::call_request(b"pending"); let call_result = client.trace_call(call_request.clone(), None, None).await?; TraceCallTest::assert_debug_call(&call_request, &call_result); @@ -431,7 +447,7 @@ impl HttpTest for EstimateGasTest { tx_executor } - async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { let l2_transaction = create_l2_transaction(10, 100); for threshold in [10_000, 50_000, 100_000, 1_000_000] { self.gas_limit_threshold.store(threshold, Ordering::Relaxed); diff --git a/core/lib/zksync_core/src/api_server/web3/tests/ws.rs b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs index 07846ff92595..cbaa52645ded 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/ws.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs @@ -96,7 +96,7 @@ async fn wait_for_notifier_miniblock( #[tokio::test] async fn notifiers_start_after_snapshot_recovery() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); prepare_recovery_snapshot( &mut storage, @@ -152,7 +152,7 @@ trait WsTest: Send + Sync { async fn test( &self, client: &WsClient, - pool: &ConnectionPool, + pool: &ConnectionPool, pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()>; @@ -162,7 +162,7 @@ trait WsTest: Send + Sync { } async fn test_ws_server(test: impl WsTest) { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let network_config = NetworkConfig::for_tests(); let contracts_config = ContractsConfig::for_tests(); let web3_config = Web3JsonRpcConfig::for_tests(); @@ -202,7 +202,7 @@ impl WsTest for WsServerCanStartTest { async fn test( &self, client: &WsClient, - _pool: &ConnectionPool, + _pool: &ConnectionPool, _pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()> { let block_number = client.get_block_number().await?; @@ -243,7 +243,7 @@ impl WsTest for BasicSubscriptionsTest { async fn test( &self, client: &WsClient, - pool: &ConnectionPool, + pool: &ConnectionPool, mut pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()> { // Wait for the notifiers to get initialized so that they don't skip notifications @@ -382,7 +382,7 @@ impl WsTest for LogSubscriptionsTest { async fn test( &self, client: &WsClient, - pool: &ConnectionPool, + pool: &ConnectionPool, mut pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()> { let LogSubscriptions { @@ -471,7 +471,7 @@ impl WsTest for LogSubscriptionsWithNewBlockTest { async fn test( &self, client: &WsClient, - pool: &ConnectionPool, + pool: &ConnectionPool, mut pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()> { let LogSubscriptions { @@ -519,7 +519,7 @@ impl WsTest for LogSubscriptionsWithManyBlocksTest { async fn test( &self, client: &WsClient, - pool: &ConnectionPool, + pool: &ConnectionPool, mut pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()> { let LogSubscriptions { @@ -565,7 +565,7 @@ impl WsTest for LogSubscriptionsWithDelayTest { async fn test( &self, client: &WsClient, - pool: &ConnectionPool, + pool: &ConnectionPool, mut pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()> { // Wait until notifiers are initialized. @@ -635,7 +635,7 @@ impl WsTest for RateLimitingTest { async fn test( &self, client: &WsClient, - _pool: &ConnectionPool, + _pool: &ConnectionPool, _pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()> { client.chain_id().await.unwrap(); @@ -672,7 +672,7 @@ impl WsTest for BatchGetsRateLimitedTest { async fn test( &self, client: &WsClient, - _pool: &ConnectionPool, + _pool: &ConnectionPool, _pub_sub_events: mpsc::UnboundedReceiver, ) -> anyhow::Result<()> { client.chain_id().await.unwrap(); diff --git a/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs b/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs index ed933c18d5b3..d4acd7423817 100644 --- a/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs +++ b/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs @@ -5,7 +5,9 @@ use async_trait::async_trait; use multivm::interface::{L2BlockEnv, VmInterface}; use tokio::{runtime::Handle, task::JoinHandle}; use vm_utils::{create_vm, execute_tx}; -use zksync_dal::{basic_witness_input_producer_dal::JOB_MAX_ATTEMPT, ConnectionPool}; +use zksync_dal::{ + basic_witness_input_producer_dal::JOB_MAX_ATTEMPT, ConnectionPool, Server, ServerDals, +}; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_queued_job_processor::JobProcessor; use zksync_types::{witness_block_state::WitnessBlockState, L1BatchNumber, L2ChainId}; @@ -20,14 +22,14 @@ mod metrics; /// to be run only using the object store information, having no other external dependency. #[derive(Debug)] pub struct BasicWitnessInputProducer { - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, l2_chain_id: L2ChainId, object_store: Arc, } impl BasicWitnessInputProducer { pub async fn new( - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, store_factory: &ObjectStoreFactory, l2_chain_id: L2ChainId, ) -> anyhow::Result { @@ -42,7 +44,7 @@ impl BasicWitnessInputProducer { rt_handle: Handle, l1_batch_number: L1BatchNumber, started_at: Instant, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, l2_chain_id: L2ChainId, ) -> anyhow::Result { let mut connection = rt_handle diff --git a/core/lib/zksync_core/src/block_reverter/mod.rs b/core/lib/zksync_core/src/block_reverter/mod.rs index f03545f81e32..214d17562527 100644 --- a/core/lib/zksync_core/src/block_reverter/mod.rs +++ b/core/lib/zksync_core/src/block_reverter/mod.rs @@ -5,7 +5,7 @@ use serde::Serialize; use tokio::time::sleep; use zksync_config::{ContractsConfig, ETHSenderConfig}; use zksync_contracts::zksync_contract; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server, ServerDals}; use zksync_eth_signer::{EthereumSigner, PrivateKeySigner, TransactionParameters}; use zksync_merkle_tree::domain::ZkSyncTree; use zksync_state::RocksdbStorage; @@ -100,7 +100,7 @@ pub struct BlockReverter { state_keeper_cache_path: String, merkle_tree_path: String, eth_config: Option, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, executed_batches_revert_mode: L1ExecutedBatchesRevert, } @@ -110,7 +110,7 @@ impl BlockReverter { state_keeper_cache_path: String, merkle_tree_path: String, eth_config: Option, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, executed_batches_revert_mode: L1ExecutedBatchesRevert, ) -> Self { Self { diff --git a/core/lib/zksync_core/src/commitment_generator/mod.rs b/core/lib/zksync_core/src/commitment_generator/mod.rs index 6932c953e96d..cd87f2f8b71a 100644 --- a/core/lib/zksync_core/src/commitment_generator/mod.rs +++ b/core/lib/zksync_core/src/commitment_generator/mod.rs @@ -6,7 +6,7 @@ use metrics::{CommitmentStage, METRICS}; use multivm::zk_evm_latest::ethereum_types::U256; use tokio::{sync::watch, task::JoinHandle}; use zksync_commitment_utils::{bootloader_initial_content_commitment, events_queue_commitment}; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server, ServerDals}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_l1_contract_interface::i_executor::commit::kzg::pubdata_to_blob_commitments; use zksync_types::{ @@ -23,12 +23,12 @@ const SLEEP_INTERVAL: Duration = Duration::from_millis(100); #[derive(Debug)] pub struct CommitmentGenerator { - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, health_updater: HealthUpdater, } impl CommitmentGenerator { - pub fn new(connection_pool: ConnectionPool) -> Self { + pub fn new(connection_pool: ConnectionPool) -> Self { Self { connection_pool, health_updater: ReactiveHealthCheck::new("commitment_generator").1, diff --git a/core/lib/zksync_core/src/consensus/storage/mod.rs b/core/lib/zksync_core/src/consensus/storage/mod.rs index 7b8bac16bc84..318ec17c845d 100644 --- a/core/lib/zksync_core/src/consensus/storage/mod.rs +++ b/core/lib/zksync_core/src/consensus/storage/mod.rs @@ -5,7 +5,7 @@ use zksync_concurrency::{ctx, error::Wrap as _, sync, time}; use zksync_consensus_bft::PayloadManager; use zksync_consensus_roles::validator; use zksync_consensus_storage::{BlockStoreState, PersistentBlockStore, ReplicaState, ReplicaStore}; -use zksync_dal::{consensus_dal::Payload, ConnectionPool}; +use zksync_dal::{consensus_dal::Payload, ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_types::MiniblockNumber; #[cfg(test)] @@ -19,8 +19,8 @@ use crate::{ }, }; -/// Context-aware `zksync_dal::StorageProcessor` wrapper. -pub(super) struct Connection<'a>(pub(super) zksync_dal::StorageProcessor<'a>); +/// Context-aware `zksync_dal::StorageProcessor` wrapper. +pub(super) struct Connection<'a>(pub(super) StorageProcessor<'a, Server>); impl<'a> Connection<'a> { /// Wrapper for `start_transaction()`. @@ -177,7 +177,7 @@ impl Cursor { /// Wrapper of `ConnectionPool` implementing `ReplicaStore` and `PayloadManager`. #[derive(Clone, Debug)] -pub struct Store(pub ConnectionPool); +pub struct Store(pub ConnectionPool); /// Wrapper of `ConnectionPool` implementing `PersistentBlockStore`. #[derive(Debug)] diff --git a/core/lib/zksync_core/src/consensus/testonly.rs b/core/lib/zksync_core/src/consensus/testonly.rs index 6de4ac156811..e30fc401a498 100644 --- a/core/lib/zksync_core/src/consensus/testonly.rs +++ b/core/lib/zksync_core/src/consensus/testonly.rs @@ -7,6 +7,7 @@ use zksync_concurrency::{ctx, error::Wrap as _, limiter, scope, sync, time}; use zksync_config::configs; use zksync_consensus_roles::validator; use zksync_contracts::BaseSystemContractsHashes; +use zksync_dal::ServerDals; use zksync_types::{ api, snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, H256, diff --git a/core/lib/zksync_core/src/consistency_checker/mod.rs b/core/lib/zksync_core/src/consistency_checker/mod.rs index ccb56f07105a..60df7be6d86c 100644 --- a/core/lib/zksync_core/src/consistency_checker/mod.rs +++ b/core/lib/zksync_core/src/consistency_checker/mod.rs @@ -4,7 +4,7 @@ use anyhow::Context as _; use serde::Serialize; use tokio::sync::watch; use zksync_contracts::PRE_BOOJUM_COMMIT_FUNCTION; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_eth_client::{ clients::QueryClient, CallFunctionArgs, Error as L1ClientError, EthInterface, }; @@ -149,7 +149,7 @@ impl LocalL1BatchCommitData { /// Returns `Ok(None)` if Postgres doesn't contain all data necessary to check L1 commitment /// for the specified batch. async fn new( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, batch_number: L1BatchNumber, ) -> anyhow::Result> { let Some(storage_l1_batch) = storage @@ -253,7 +253,7 @@ pub struct ConsistencyChecker { l1_client: Box, event_handler: Box, l1_data_mismatch_behavior: L1DataMismatchBehavior, - pool: ConnectionPool, + pool: ConnectionPool, health_check: ReactiveHealthCheck, } @@ -263,7 +263,7 @@ impl ConsistencyChecker { pub fn new( web3_url: &str, max_batches_to_recheck: u32, - pool: ConnectionPool, + pool: ConnectionPool, ) -> anyhow::Result { let web3 = QueryClient::new(web3_url).context("cannot create L1 Web3 client")?; let (health_check, health_updater) = ConsistencyCheckerHealthUpdater::new(); diff --git a/core/lib/zksync_core/src/consistency_checker/tests/mod.rs b/core/lib/zksync_core/src/consistency_checker/tests/mod.rs index 3f44fcfaaa7e..ac3f9154cdaf 100644 --- a/core/lib/zksync_core/src/consistency_checker/tests/mod.rs +++ b/core/lib/zksync_core/src/consistency_checker/tests/mod.rs @@ -72,7 +72,7 @@ fn build_commit_tx_input_data(batches: &[L1BatchWithMetadata]) -> Vec { encoded } -fn create_mock_checker(client: MockEthereum, pool: ConnectionPool) -> ConsistencyChecker { +fn create_mock_checker(client: MockEthereum, pool: ConnectionPool) -> ConsistencyChecker { let (health_check, health_updater) = ConsistencyCheckerHealthUpdater::new(); ConsistencyChecker { contract: zksync_contracts::zksync_contract(), @@ -230,7 +230,7 @@ enum SaveAction<'a> { impl SaveAction<'_> { async fn apply( self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, commit_tx_hash_by_l1_batch: &HashMap, ) { match self { @@ -361,7 +361,7 @@ async fn normal_checker_function( ) { println!("Using save_actions_mapper={mapper_name}"); - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await @@ -431,7 +431,7 @@ async fn checker_processes_pre_boojum_batches( ) { println!("Using save_actions_mapper={mapper_name}"); - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let genesis_params = GenesisParams { protocol_version: PRE_BOOJUM_PROTOCOL_VERSION, @@ -504,7 +504,7 @@ async fn checker_processes_pre_boojum_batches( #[test_casing(2, [false, true])] #[tokio::test] async fn checker_functions_after_snapshot_recovery(delay_batch_insertion: bool) { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); storage .protocol_versions_dal() @@ -677,7 +677,7 @@ impl IncorrectDataKind { // ^ `snapshot_recovery = true` is tested below; we don't want to run it with all incorrect data kinds #[tokio::test] async fn checker_detects_incorrect_tx_data(kind: IncorrectDataKind, snapshot_recovery: bool) { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); if snapshot_recovery { storage diff --git a/core/lib/zksync_core/src/eth_sender/aggregator.rs b/core/lib/zksync_core/src/eth_sender/aggregator.rs index 4dd1fec06621..92c5c16fdb42 100644 --- a/core/lib/zksync_core/src/eth_sender/aggregator.rs +++ b/core/lib/zksync_core/src/eth_sender/aggregator.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use zksync_config::configs::eth_sender::{ProofLoadingMode, ProofSendingMode, SenderConfig}; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_l1_contract_interface::i_executor::methods::{ CommitBatches, ExecuteBatches, ProveBatches, }; @@ -108,7 +108,7 @@ impl Aggregator { pub async fn get_next_ready_operation( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, base_system_contracts_hashes: BaseSystemContractsHashes, protocol_version_id: ProtocolVersionId, l1_verifier_config: L1VerifierConfig, @@ -156,7 +156,7 @@ impl Aggregator { async fn get_execute_operations( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, limit: usize, last_sealed_l1_batch: L1BatchNumber, ) -> Option { @@ -182,7 +182,7 @@ impl Aggregator { async fn get_commit_operation( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, limit: usize, last_sealed_batch: L1BatchNumber, base_system_contracts_hashes: BaseSystemContractsHashes, @@ -243,7 +243,7 @@ impl Aggregator { } async fn load_dummy_proof_operations( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, limit: usize, is_4844_mode: bool, ) -> Vec { @@ -287,7 +287,7 @@ impl Aggregator { } async fn load_real_proof_operation( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, l1_verifier_config: L1VerifierConfig, proof_loading_mode: &ProofLoadingMode, blob_store: &dyn ObjectStore, @@ -381,7 +381,7 @@ impl Aggregator { async fn prepare_dummy_proof_operation( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, ready_for_proof_l1_batches: Vec, last_sealed_l1_batch: L1BatchNumber, ) -> Option { @@ -410,7 +410,7 @@ impl Aggregator { async fn get_proof_operation( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, limit: usize, last_sealed_l1_batch: L1BatchNumber, l1_verifier_config: L1VerifierConfig, @@ -473,7 +473,7 @@ impl Aggregator { } async fn extract_ready_subrange( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, publish_criteria: &mut [Box], unpublished_l1_batches: Vec, last_sealed_l1_batch: L1BatchNumber, diff --git a/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs b/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs index 68d3236e264c..e067524925e2 100644 --- a/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs +++ b/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs @@ -3,7 +3,7 @@ use std::{convert::TryInto, sync::Arc}; use tokio::sync::watch; use zksync_config::configs::eth_sender::SenderConfig; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_eth_client::{BoundEthInterface, CallFunctionArgs}; use zksync_l1_contract_interface::{ i_executor::commit::kzg::{KzgInfo, ZK_SYNC_BYTES_PER_BLOB}, @@ -62,7 +62,7 @@ pub struct EthTxAggregator { /// transactions. The `Some` then contains the address of this custom operator /// address. custom_commit_sender_addr: Option
, - pool: ConnectionPool, + pool: ConnectionPool, } struct TxData { @@ -73,7 +73,7 @@ struct TxData { impl EthTxAggregator { #[allow(clippy::too_many_arguments)] pub async fn new( - pool: ConnectionPool, + pool: ConnectionPool, config: SenderConfig, aggregator: Aggregator, eth_client: Arc, @@ -342,7 +342,7 @@ impl EthTxAggregator { #[tracing::instrument(skip(self, storage))] async fn loop_iteration( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, ) -> Result<(), ETHSenderError> { let MulticallData { base_system_contracts_hashes, @@ -385,7 +385,7 @@ impl EthTxAggregator { } async fn report_eth_tx_saving( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, aggregated_op: AggregatedOperation, tx: &EthTx, ) { @@ -519,7 +519,7 @@ impl EthTxAggregator { pub(super) async fn save_eth_tx( &self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, aggregated_op: &AggregatedOperation, contracts_are_pre_shared_bridge: bool, ) -> Result { @@ -569,7 +569,7 @@ impl EthTxAggregator { async fn get_next_nonce( &self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, from_addr: Option
, ) -> Result { let db_nonce = storage diff --git a/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs b/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs index 5bde8f5fdd0b..ed34114f6e9c 100644 --- a/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs +++ b/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs @@ -3,7 +3,7 @@ use std::{sync::Arc, time::Duration}; use anyhow::Context as _; use tokio::sync::watch; use zksync_config::configs::eth_sender::SenderConfig; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_eth_client::{ encode_blob_tx_with_sidecar, BoundEthInterface, Error, EthInterface, ExecutedTxStatus, Options, RawTransactionBytes, SignedCallResult, @@ -58,12 +58,12 @@ pub struct EthTxManager { ethereum_gateway_blobs: Option>, config: SenderConfig, gas_adjuster: Arc, - pool: ConnectionPool, + pool: ConnectionPool, } impl EthTxManager { pub fn new( - pool: ConnectionPool, + pool: ConnectionPool, config: SenderConfig, gas_adjuster: Arc, ethereum_gateway: Arc, @@ -90,7 +90,7 @@ impl EthTxManager { async fn check_all_sending_attempts( &self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, op: &EthTx, ) -> Option { // Checking history items, starting from most recently sent. @@ -118,7 +118,7 @@ impl EthTxManager { async fn calculate_fee( &self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, tx: &EthTx, time_in_mempool: u32, ) -> Result { @@ -193,7 +193,7 @@ impl EthTxManager { async fn increase_priority_fee( &self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, eth_tx_id: u32, base_fee_per_gas: u64, ) -> Result { @@ -229,7 +229,7 @@ impl EthTxManager { pub(crate) async fn send_eth_tx( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, tx: &EthTx, time_in_mempool: u32, current_block: L1BlockNumber, @@ -297,7 +297,7 @@ impl EthTxManager { async fn send_raw_transaction( &self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, tx_history_id: u32, raw_tx: RawTransactionBytes, current_block: L1BlockNumber, @@ -416,7 +416,7 @@ impl EthTxManager { // returns the one that has to be resent (if there is one). pub(super) async fn monitor_inflight_transactions( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, l1_block_numbers: L1BlockNumbers, ) -> Result, ETHSenderError> { METRICS.track_block_numbers(&l1_block_numbers); @@ -454,7 +454,7 @@ impl EthTxManager { async fn monitor_inflight_transactions_inner( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, l1_block_numbers: L1BlockNumbers, operator_nonce: OperatorNonce, operator_address: Option
, @@ -581,7 +581,7 @@ impl EthTxManager { async fn send_unsent_txs( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, l1_block_numbers: L1BlockNumbers, ) { for tx in storage.eth_sender_dal().get_unsent_txs().await.unwrap() { @@ -623,7 +623,7 @@ impl EthTxManager { async fn apply_tx_status( &self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, tx: &EthTx, tx_status: ExecutedTxStatus, finalized_block: L1BlockNumber, @@ -646,7 +646,7 @@ impl EthTxManager { pub async fn fail_tx( &self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, tx: &EthTx, tx_status: ExecutedTxStatus, ) { @@ -674,7 +674,7 @@ impl EthTxManager { pub async fn confirm_tx( &self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, tx: &EthTx, tx_status: ExecutedTxStatus, ) { @@ -761,7 +761,7 @@ impl EthTxManager { async fn send_new_eth_txs( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, current_block: L1BlockNumber, ) { let number_inflight_txs = storage @@ -792,7 +792,7 @@ impl EthTxManager { #[tracing::instrument(skip(self, storage))] async fn loop_iteration( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, previous_block: L1BlockNumber, ) -> Result { let l1_block_numbers = self.get_l1_block_numbers().await?; diff --git a/core/lib/zksync_core/src/eth_sender/metrics.rs b/core/lib/zksync_core/src/eth_sender/metrics.rs index 2b5f4b902ad2..c2ae78db4472 100644 --- a/core/lib/zksync_core/src/eth_sender/metrics.rs +++ b/core/lib/zksync_core/src/eth_sender/metrics.rs @@ -3,7 +3,7 @@ use std::{fmt, time::Duration}; use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics}; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_types::{aggregated_operations::AggregatedActionType, eth_sender::EthTx}; use zksync_utils::time::seconds_since_epoch; @@ -113,7 +113,7 @@ impl EthSenderMetrics { pub async fn track_eth_tx_metrics( &self, - connection: &mut StorageProcessor<'_>, + connection: &mut StorageProcessor<'_, Server>, l1_stage: BlockL1Stage, tx: &EthTx, ) { diff --git a/core/lib/zksync_core/src/eth_sender/publish_criterion.rs b/core/lib/zksync_core/src/eth_sender/publish_criterion.rs index 434a3d4b1d3d..933547147e8b 100644 --- a/core/lib/zksync_core/src/eth_sender/publish_criterion.rs +++ b/core/lib/zksync_core/src/eth_sender/publish_criterion.rs @@ -2,7 +2,7 @@ use std::fmt; use async_trait::async_trait; use chrono::Utc; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_l1_contract_interface::{i_executor::structures::CommitBatchInfo, Tokenizable}; use zksync_types::{ aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, ethabi, @@ -21,7 +21,7 @@ pub trait L1BatchPublishCriterion: fmt::Debug + Send + Sync { /// Otherwise, returns the number of the last L1 batch that needs to be published. async fn last_l1_batch_to_publish( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, consecutive_l1_batches: &[L1BatchWithMetadata], last_sealed_l1_batch: L1BatchNumber, ) -> Option; @@ -42,7 +42,7 @@ impl L1BatchPublishCriterion for NumberCriterion { async fn last_l1_batch_to_publish( &mut self, - _storage: &mut StorageProcessor<'_>, + _storage: &mut StorageProcessor<'_, Server>, consecutive_l1_batches: &[L1BatchWithMetadata], _last_sealed_l1_batch: L1BatchNumber, ) -> Option { @@ -88,7 +88,7 @@ impl L1BatchPublishCriterion for TimestampDeadlineCriterion { async fn last_l1_batch_to_publish( &mut self, - _storage: &mut StorageProcessor<'_>, + _storage: &mut StorageProcessor<'_, Server>, consecutive_l1_batches: &[L1BatchWithMetadata], last_sealed_l1_batch: L1BatchNumber, ) -> Option { @@ -133,7 +133,7 @@ impl GasCriterion { async fn get_gas_amount( &self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, batch_number: L1BatchNumber, ) -> u32 { storage @@ -152,7 +152,7 @@ impl L1BatchPublishCriterion for GasCriterion { async fn last_l1_batch_to_publish( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, consecutive_l1_batches: &[L1BatchWithMetadata], _last_sealed_l1_batch: L1BatchNumber, ) -> Option { @@ -210,7 +210,7 @@ impl L1BatchPublishCriterion for DataSizeCriterion { async fn last_l1_batch_to_publish( &mut self, - _storage: &mut StorageProcessor<'_>, + _storage: &mut StorageProcessor<'_, Server>, consecutive_l1_batches: &[L1BatchWithMetadata], _last_sealed_l1_batch: L1BatchNumber, ) -> Option { diff --git a/core/lib/zksync_core/src/eth_sender/tests.rs b/core/lib/zksync_core/src/eth_sender/tests.rs index 571a308a80fd..b142fc19fe2e 100644 --- a/core/lib/zksync_core/src/eth_sender/tests.rs +++ b/core/lib/zksync_core/src/eth_sender/tests.rs @@ -7,7 +7,7 @@ use zksync_config::{ configs::eth_sender::{ProofSendingMode, PubdataSendingMode, SenderConfig}, ContractsConfig, ETHSenderConfig, GasAdjusterConfig, }; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_eth_client::{clients::MockEthereum, EthInterface}; use zksync_l1_contract_interface::i_executor::methods::{ CommitBatches, ExecuteBatches, ProveBatches, @@ -64,7 +64,7 @@ fn mock_multicall_response() -> Token { #[derive(Debug)] struct EthSenderTester { - conn: ConnectionPool, + conn: ConnectionPool, gateway: Arc, manager: MockEthTxManager, aggregator: EthTxAggregator, @@ -76,7 +76,7 @@ impl EthSenderTester { const MAX_BASE_FEE_SAMPLES: usize = 3; async fn new( - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, history: Vec, non_ordering_confirmations: bool, aggregator_operate_4844_mode: bool, @@ -158,7 +158,7 @@ impl EthSenderTester { } } - async fn storage(&self) -> StorageProcessor<'_> { + async fn storage(&self) -> StorageProcessor<'_, Server> { self.conn.access_storage().await.unwrap() } @@ -177,7 +177,7 @@ impl EthSenderTester { #[test_casing(2, [false, true])] #[tokio::test] async fn confirm_many(aggregator_operate_4844_mode: bool) -> anyhow::Result<()> { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new( connection_pool, vec![10; 100], @@ -259,7 +259,7 @@ async fn confirm_many(aggregator_operate_4844_mode: bool) -> anyhow::Result<()> // Tests that we resend first un-mined transaction every block with an increased gas price. #[tokio::test] async fn resend_each_block() -> anyhow::Result<()> { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new(connection_pool, vec![7, 6, 5, 5, 5, 2, 1], false, false).await; @@ -371,7 +371,7 @@ async fn resend_each_block() -> anyhow::Result<()> { // we won't mark it as confirmed but also won't resend it. #[tokio::test] async fn dont_resend_already_mined() -> anyhow::Result<()> { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], false, false).await; let tx = tester .aggregator @@ -442,7 +442,7 @@ async fn dont_resend_already_mined() -> anyhow::Result<()> { #[tokio::test] async fn three_scenarios() -> anyhow::Result<()> { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new(connection_pool.clone(), vec![100; 100], false, false).await; let mut hashes = vec![]; @@ -515,7 +515,7 @@ async fn three_scenarios() -> anyhow::Result<()> { #[should_panic(expected = "We can't operate after tx fail")] #[tokio::test] async fn failed_eth_tx() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new(connection_pool.clone(), vec![100; 100], false, false).await; @@ -587,7 +587,7 @@ fn l1_batch_with_metadata(header: L1BatchHeader) -> L1BatchWithMetadata { #[tokio::test] async fn correct_order_for_confirmations() -> anyhow::Result<()> { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], true, false).await; insert_genesis_protocol_version(&tester).await; let genesis_l1_batch = insert_l1_batch(&tester, L1BatchNumber(0)).await; @@ -648,7 +648,7 @@ async fn correct_order_for_confirmations() -> anyhow::Result<()> { #[tokio::test] async fn skipped_l1_batch_at_the_start() -> anyhow::Result<()> { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], true, false).await; insert_genesis_protocol_version(&tester).await; let genesis_l1_batch = insert_l1_batch(&tester, L1BatchNumber(0)).await; @@ -741,7 +741,7 @@ async fn skipped_l1_batch_at_the_start() -> anyhow::Result<()> { #[tokio::test] async fn skipped_l1_batch_in_the_middle() -> anyhow::Result<()> { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], true, false).await; insert_genesis_protocol_version(&tester).await; let genesis_l1_batch = insert_l1_batch(&tester, L1BatchNumber(0)).await; @@ -828,7 +828,7 @@ async fn skipped_l1_batch_in_the_middle() -> anyhow::Result<()> { #[tokio::test] async fn test_parse_multicall_data() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let tester = EthSenderTester::new(connection_pool, vec![100; 100], false, false).await; assert!(tester @@ -893,7 +893,7 @@ async fn test_parse_multicall_data() { #[tokio::test] async fn get_multicall_data() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut tester = EthSenderTester::new(connection_pool, vec![100; 100], false, false).await; let multicall_data = tester.aggregator.get_multicall_data().await; assert!(multicall_data.is_ok()); diff --git a/core/lib/zksync_core/src/eth_watch/event_processors/governance_upgrades.rs b/core/lib/zksync_core/src/eth_watch/event_processors/governance_upgrades.rs index 7066838fee88..6e33715f6e6f 100644 --- a/core/lib/zksync_core/src/eth_watch/event_processors/governance_upgrades.rs +++ b/core/lib/zksync_core/src/eth_watch/event_processors/governance_upgrades.rs @@ -1,8 +1,8 @@ use std::{convert::TryFrom, time::Instant}; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_types::{ - ethabi::Contract, protocol_version::GovernanceOperation, web3::types::Log, Address, + ethabi::Contract, protocol_upgrade::GovernanceOperation, web3::types::Log, Address, ProtocolUpgrade, ProtocolVersionId, H256, }; @@ -41,7 +41,7 @@ impl GovernanceUpgradesEventProcessor { impl EventProcessor for GovernanceUpgradesEventProcessor { async fn process_events( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, client: &dyn EthClient, events: Vec, ) -> Result<(), Error> { diff --git a/core/lib/zksync_core/src/eth_watch/event_processors/mod.rs b/core/lib/zksync_core/src/eth_watch/event_processors/mod.rs index 0a068033f2bd..30172e835a9e 100644 --- a/core/lib/zksync_core/src/eth_watch/event_processors/mod.rs +++ b/core/lib/zksync_core/src/eth_watch/event_processors/mod.rs @@ -1,6 +1,6 @@ use std::fmt; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, StorageProcessor}; use zksync_types::{web3::types::Log, H256}; use crate::eth_watch::client::{Error, EthClient}; @@ -14,7 +14,7 @@ pub trait EventProcessor: 'static + fmt::Debug + Send + Sync { /// Processes given events async fn process_events( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, client: &dyn EthClient, events: Vec, ) -> Result<(), Error>; diff --git a/core/lib/zksync_core/src/eth_watch/event_processors/priority_ops.rs b/core/lib/zksync_core/src/eth_watch/event_processors/priority_ops.rs index ad24eba1791b..033215def651 100644 --- a/core/lib/zksync_core/src/eth_watch/event_processors/priority_ops.rs +++ b/core/lib/zksync_core/src/eth_watch/event_processors/priority_ops.rs @@ -1,7 +1,7 @@ use std::convert::TryFrom; use zksync_contracts::zksync_contract; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_types::{l1::L1Tx, web3::types::Log, PriorityOpId, H256}; use crate::{ @@ -36,7 +36,7 @@ impl PriorityOpsEventProcessor { impl EventProcessor for PriorityOpsEventProcessor { async fn process_events( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, _client: &dyn EthClient, events: Vec, ) -> Result<(), Error> { diff --git a/core/lib/zksync_core/src/eth_watch/event_processors/upgrades.rs b/core/lib/zksync_core/src/eth_watch/event_processors/upgrades.rs index 393dad5afcda..4ea2dc3960d2 100644 --- a/core/lib/zksync_core/src/eth_watch/event_processors/upgrades.rs +++ b/core/lib/zksync_core/src/eth_watch/event_processors/upgrades.rs @@ -1,6 +1,6 @@ use std::convert::TryFrom; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_types::{web3::types::Log, ProtocolUpgrade, ProtocolVersionId, H256}; use crate::eth_watch::{ @@ -32,7 +32,7 @@ impl UpgradesEventProcessor { impl EventProcessor for UpgradesEventProcessor { async fn process_events( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, client: &dyn EthClient, events: Vec, ) -> Result<(), Error> { diff --git a/core/lib/zksync_core/src/eth_watch/mod.rs b/core/lib/zksync_core/src/eth_watch/mod.rs index 0f23650359ef..3b7847888c4a 100644 --- a/core/lib/zksync_core/src/eth_watch/mod.rs +++ b/core/lib/zksync_core/src/eth_watch/mod.rs @@ -8,7 +8,7 @@ use std::{sync::Arc, time::Duration}; use tokio::{sync::watch, task::JoinHandle}; use zksync_config::ETHWatchConfig; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_eth_client::EthInterface; use zksync_system_constants::PRIORITY_EXPIRATION; use zksync_types::{ @@ -45,7 +45,7 @@ pub struct EthWatch { event_processors: Vec>, last_processed_ethereum_block: u64, - pool: ConnectionPool, + pool: ConnectionPool, } impl EthWatch { @@ -53,7 +53,7 @@ impl EthWatch { diamond_proxy_address: Address, governance_contract: Option, mut client: Box, - pool: ConnectionPool, + pool: ConnectionPool, poll_interval: Duration, ) -> Self { let mut storage = pool.access_storage_tagged("eth_watch").await.unwrap(); @@ -98,7 +98,7 @@ impl EthWatch { async fn initialize_state( client: &dyn EthClient, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, ) -> EthWatchState { let next_expected_priority_id: PriorityOpId = storage .transactions_dal() @@ -162,7 +162,10 @@ impl EthWatch { } #[tracing::instrument(skip(self, storage))] - async fn loop_iteration(&mut self, storage: &mut StorageProcessor<'_>) -> Result<(), Error> { + async fn loop_iteration( + &mut self, + storage: &mut StorageProcessor<'_, Server>, + ) -> Result<(), Error> { let stage_latency = METRICS.poll_eth_node[&PollStage::Request].start(); let to_block = self.client.finalized_block_number().await?; if to_block <= self.last_processed_ethereum_block { @@ -191,7 +194,7 @@ impl EthWatch { pub async fn start_eth_watch( config: ETHWatchConfig, - pool: ConnectionPool, + pool: ConnectionPool, eth_gateway: Arc, diamond_proxy_addr: Address, governance: (Contract, Address), diff --git a/core/lib/zksync_core/src/eth_watch/tests.rs b/core/lib/zksync_core/src/eth_watch/tests.rs index 8d9965ef6f23..0f08a4bd1939 100644 --- a/core/lib/zksync_core/src/eth_watch/tests.rs +++ b/core/lib/zksync_core/src/eth_watch/tests.rs @@ -2,11 +2,11 @@ use std::{collections::HashMap, convert::TryInto, sync::Arc}; use tokio::sync::RwLock; use zksync_contracts::{governance_contract, zksync_contract}; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_types::{ ethabi::{encode, Hash, Token}, l1::{L1Tx, OpProcessingType, PriorityQueueType}, - protocol_version::{ProtocolUpgradeTx, ProtocolUpgradeTxCommonData}, + protocol_upgrade::{ProtocolUpgradeTx, ProtocolUpgradeTxCommonData}, web3::types::{Address, BlockNumber, Log}, Execute, L1TxCommonData, PriorityOpId, ProtocolUpgrade, ProtocolVersion, ProtocolVersionId, Transaction, H256, U256, @@ -202,7 +202,7 @@ fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx #[tokio::test] async fn test_normal_operation_l1_txs() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; let mut client = FakeEthClient::new(); @@ -250,7 +250,7 @@ async fn test_normal_operation_l1_txs() { #[tokio::test] async fn test_normal_operation_upgrades() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; let mut client = FakeEthClient::new(); @@ -311,7 +311,7 @@ async fn test_normal_operation_upgrades() { #[tokio::test] async fn test_gap_in_upgrades() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; let mut client = FakeEthClient::new(); @@ -350,7 +350,7 @@ async fn test_gap_in_upgrades() { #[tokio::test] async fn test_normal_operation_governance_upgrades() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; let mut client = FakeEthClient::new(); @@ -412,7 +412,7 @@ async fn test_normal_operation_governance_upgrades() { #[tokio::test] #[should_panic] async fn test_gap_in_single_batch() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; let mut client = FakeEthClient::new(); @@ -442,7 +442,7 @@ async fn test_gap_in_single_batch() { #[tokio::test] #[should_panic] async fn test_gap_between_batches() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; let mut client = FakeEthClient::new(); @@ -477,7 +477,7 @@ async fn test_gap_between_batches() { #[tokio::test] async fn test_overlapping_batches() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; let mut client = FakeEthClient::new(); @@ -523,7 +523,7 @@ async fn test_overlapping_batches() { assert_eq!(tx.common_data.serial_id.0, 4); } -async fn get_all_db_txs(storage: &mut StorageProcessor<'_>) -> Vec { +async fn get_all_db_txs(storage: &mut StorageProcessor<'_, Server>) -> Vec { storage.transactions_dal().reset_mempool().await.unwrap(); storage .transactions_dal() @@ -761,7 +761,7 @@ fn upgrade_into_diamond_cut(upgrade: ProtocolUpgrade) -> Token { ]) } -async fn setup_db(connection_pool: &ConnectionPool) { +async fn setup_db(connection_pool: &ConnectionPool) { connection_pool .access_storage() .await diff --git a/core/lib/zksync_core/src/fee_model.rs b/core/lib/zksync_core/src/fee_model.rs index 643ae175754d..cfeeea167d9e 100644 --- a/core/lib/zksync_core/src/fee_model.rs +++ b/core/lib/zksync_core/src/fee_model.rs @@ -1,6 +1,6 @@ use std::{fmt, sync::Arc}; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server, ServerDals}; use zksync_types::{ fee_model::{ BatchFeeInput, FeeModelConfig, FeeModelConfigV2, FeeParams, FeeParamsV1, FeeParamsV2, @@ -84,13 +84,13 @@ impl MainNodeFeeInputProvider { #[derive(Debug)] pub(crate) struct ApiFeeInputProvider { inner: Arc, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, } impl ApiFeeInputProvider { pub fn new( inner: Arc, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, ) -> Self { Self { inner, diff --git a/core/lib/zksync_core/src/genesis.rs b/core/lib/zksync_core/src/genesis.rs index cfbdee8ed3fa..d27c9540f518 100644 --- a/core/lib/zksync_core/src/genesis.rs +++ b/core/lib/zksync_core/src/genesis.rs @@ -9,7 +9,7 @@ use multivm::{ zk_evm_latest::aux_structures::{LogQuery as MultiVmLogQuery, Timestamp as MultiVMTimestamp}, }; use zksync_contracts::{BaseSystemContracts, SET_CHAIN_ID_EVENT}; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_eth_client::{clients::QueryClient, EthInterface}; use zksync_merkle_tree::domain::ZkSyncTree; use zksync_system_constants::PRIORITY_EXPIRATION; @@ -21,7 +21,8 @@ use zksync_types::{ commitment::{CommitmentInput, L1BatchCommitment}, fee_model::BatchFeeInput, get_code_key, get_system_context_init_logs, - protocol_version::{decode_set_chain_id_event, L1VerifierConfig, ProtocolVersion}, + protocol_upgrade::{decode_set_chain_id_event, ProtocolVersion}, + protocol_version::L1VerifierConfig, tokens::{TokenInfo, TokenMetadata, ETHEREUM_ADDRESS}, web3::types::{BlockNumber, FilterBuilder}, zk_evm_types::{LogQuery, Timestamp}, @@ -57,7 +58,7 @@ impl GenesisParams { } pub async fn ensure_genesis_state( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, zksync_chain_id: L2ChainId, genesis_params: &GenesisParams, ) -> anyhow::Result { @@ -153,7 +154,7 @@ pub async fn ensure_genesis_state( // The code of the bootloader should not be deployed anywhere anywhere in the kernel space (i.e. addresses below 2^16) // because in this case we will have to worry about protecting it. async fn insert_base_system_contracts_to_factory_deps( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, contracts: &BaseSystemContracts, ) -> anyhow::Result<()> { let factory_deps = [&contracts.bootloader, &contracts.default_aa] @@ -169,7 +170,7 @@ async fn insert_base_system_contracts_to_factory_deps( } async fn insert_system_contracts( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, contracts: &[DeployedContract], chain_id: L2ChainId, ) -> anyhow::Result<()> { @@ -284,7 +285,7 @@ async fn insert_system_contracts( #[allow(clippy::too_many_arguments)] pub(crate) async fn create_genesis_l1_batch( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, first_validator_address: Address, chain_id: L2ChainId, protocol_version: ProtocolVersionId, @@ -361,7 +362,7 @@ pub(crate) async fn create_genesis_l1_batch( Ok(()) } -async fn add_eth_token(transaction: &mut StorageProcessor<'_>) -> anyhow::Result<()> { +async fn add_eth_token(transaction: &mut StorageProcessor<'_, Server>) -> anyhow::Result<()> { assert!(transaction.in_transaction()); // sanity check let eth_token = TokenInfo { l1_address: ETHEREUM_ADDRESS, @@ -387,7 +388,7 @@ async fn add_eth_token(transaction: &mut StorageProcessor<'_>) -> anyhow::Result } async fn save_genesis_l1_batch_metadata( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, commitment: L1BatchCommitment, genesis_root_hash: H256, rollup_last_leaf_index: u64, @@ -422,7 +423,7 @@ pub(crate) async fn save_set_chain_id_tx( eth_client_url: &str, diamond_proxy_address: Address, state_transition_manager_address: Address, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, ) -> anyhow::Result<()> { let eth_client = QueryClient::new(eth_client_url)?; let to = eth_client.block_number("fetch_chain_id_tx").await?.as_u64(); @@ -455,14 +456,14 @@ pub(crate) async fn save_set_chain_id_tx( #[cfg(test)] mod tests { - use zksync_dal::ConnectionPool; + use zksync_dal::{ConnectionPool, Server, ServerDals}; use zksync_types::system_contracts::get_system_smart_contracts; use super::*; #[tokio::test] async fn running_genesis() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.blocks_dal().delete_genesis().await.unwrap(); @@ -494,7 +495,7 @@ mod tests { #[tokio::test] async fn running_genesis_with_big_chain_id() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.blocks_dal().delete_genesis().await.unwrap(); @@ -520,7 +521,7 @@ mod tests { #[tokio::test] async fn running_genesis_with_non_latest_protocol_version() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let params = GenesisParams { protocol_version: ProtocolVersionId::Version10, diff --git a/core/lib/zksync_core/src/house_keeper/blocks_state_reporter.rs b/core/lib/zksync_core/src/house_keeper/blocks_state_reporter.rs index 695c2008e134..75b10875e8ca 100644 --- a/core/lib/zksync_core/src/house_keeper/blocks_state_reporter.rs +++ b/core/lib/zksync_core/src/house_keeper/blocks_state_reporter.rs @@ -1,5 +1,5 @@ use async_trait::async_trait; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server, ServerDals}; use zksync_utils::time::seconds_since_epoch; use crate::{ @@ -10,11 +10,11 @@ use crate::{ #[derive(Debug)] pub struct L1BatchMetricsReporter { reporting_interval_ms: u64, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, } impl L1BatchMetricsReporter { - pub fn new(reporting_interval_ms: u64, connection_pool: ConnectionPool) -> Self { + pub fn new(reporting_interval_ms: u64, connection_pool: ConnectionPool) -> Self { Self { reporting_interval_ms, connection_pool, diff --git a/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_job_retry_manager.rs b/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_job_retry_manager.rs index 7cf2c231b67a..7081e64a1ae2 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_job_retry_manager.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_job_retry_manager.rs @@ -1,13 +1,14 @@ use std::time::Duration; use async_trait::async_trait; +use prover_dal::{Prover, ProverDals}; use zksync_dal::ConnectionPool; use crate::house_keeper::periodic_job::PeriodicJob; #[derive(Debug)] pub struct FriProofCompressorJobRetryManager { - pool: ConnectionPool, + pool: ConnectionPool, max_attempts: u32, processing_timeout: Duration, retry_interval_ms: u64, @@ -18,7 +19,7 @@ impl FriProofCompressorJobRetryManager { max_attempts: u32, processing_timeout: Duration, retry_interval_ms: u64, - pool: ConnectionPool, + pool: ConnectionPool, ) -> Self { Self { max_attempts, diff --git a/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs index 0039b32bc77c..7c90d490c08b 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs @@ -1,5 +1,7 @@ use async_trait::async_trait; -use zksync_dal::{fri_prover_dal::types::JobCountStatistics, ConnectionPool}; +use prover_dal::{Prover, ProverDals}; +use zksync_dal::ConnectionPool; +use zksync_types::prover_dal::JobCountStatistics; use crate::house_keeper::periodic_job::PeriodicJob; @@ -8,18 +10,18 @@ const PROOF_COMPRESSOR_SERVICE_NAME: &str = "proof_compressor"; #[derive(Debug)] pub struct FriProofCompressorStatsReporter { reporting_interval_ms: u64, - pool: ConnectionPool, + pool: ConnectionPool, } impl FriProofCompressorStatsReporter { - pub fn new(reporting_interval_ms: u64, pool: ConnectionPool) -> Self { + pub fn new(reporting_interval_ms: u64, pool: ConnectionPool) -> Self { Self { reporting_interval_ms, pool, } } - async fn get_job_statistics(pool: &ConnectionPool) -> JobCountStatistics { + async fn get_job_statistics(pool: &ConnectionPool) -> JobCountStatistics { pool.access_storage() .await .unwrap() diff --git a/core/lib/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs b/core/lib/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs index 8ff847a5ca92..1feec2cc74cf 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_prover_job_retry_manager.rs @@ -1,13 +1,14 @@ use std::time::Duration; use async_trait::async_trait; +use prover_dal::{Prover, ProverDals}; use zksync_dal::ConnectionPool; use crate::house_keeper::periodic_job::PeriodicJob; #[derive(Debug)] pub struct FriProverJobRetryManager { - pool: ConnectionPool, + pool: ConnectionPool, max_attempts: u32, processing_timeout: Duration, retry_interval_ms: u64, @@ -18,7 +19,7 @@ impl FriProverJobRetryManager { max_attempts: u32, processing_timeout: Duration, retry_interval_ms: u64, - pool: ConnectionPool, + pool: ConnectionPool, ) -> Self { Self { max_attempts, diff --git a/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs index 90f90759b323..91a42c482a1b 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs @@ -1,22 +1,23 @@ use async_trait::async_trait; +use prover_dal::{Prover, ProverDals}; use zksync_config::configs::fri_prover_group::FriProverGroupConfig; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server, ServerDals}; use crate::house_keeper::periodic_job::PeriodicJob; #[derive(Debug)] pub struct FriProverStatsReporter { reporting_interval_ms: u64, - prover_connection_pool: ConnectionPool, - db_connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, + db_connection_pool: ConnectionPool, config: FriProverGroupConfig, } impl FriProverStatsReporter { pub fn new( reporting_interval_ms: u64, - prover_connection_pool: ConnectionPool, - db_connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, + db_connection_pool: ConnectionPool, config: FriProverGroupConfig, ) -> Self { Self { diff --git a/core/lib/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs b/core/lib/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs index 70911339a8fd..3494d7e37de3 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs @@ -1,4 +1,5 @@ use async_trait::async_trait; +use prover_dal::{Prover, ProverDals}; use zksync_dal::ConnectionPool; use crate::house_keeper::periodic_job::PeriodicJob; @@ -6,11 +7,11 @@ use crate::house_keeper::periodic_job::PeriodicJob; #[derive(Debug)] pub struct SchedulerCircuitQueuer { queuing_interval_ms: u64, - pool: ConnectionPool, + pool: ConnectionPool, } impl SchedulerCircuitQueuer { - pub fn new(queuing_interval_ms: u64, pool: ConnectionPool) -> Self { + pub fn new(queuing_interval_ms: u64, pool: ConnectionPool) -> Self { Self { queuing_interval_ms, pool, diff --git a/core/lib/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs b/core/lib/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs index 3aa21bdd534d..9afadec6c323 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_witness_generator_jobs_retry_manager.rs @@ -1,13 +1,14 @@ use std::time::Duration; use async_trait::async_trait; +use prover_dal::{Prover, ProverDals}; use zksync_dal::ConnectionPool; use crate::house_keeper::periodic_job::PeriodicJob; #[derive(Debug)] pub struct FriWitnessGeneratorJobRetryManager { - pool: ConnectionPool, + pool: ConnectionPool, max_attempts: u32, processing_timeout: Duration, retry_interval_ms: u64, @@ -18,7 +19,7 @@ impl FriWitnessGeneratorJobRetryManager { max_attempts: u32, processing_timeout: Duration, retry_interval_ms: u64, - pool: ConnectionPool, + pool: ConnectionPool, ) -> Self { Self { max_attempts, diff --git a/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs index cf1cdc90314e..83dba44af527 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs @@ -1,8 +1,9 @@ use std::collections::HashMap; use async_trait::async_trait; -use zksync_dal::{fri_prover_dal::types::JobCountStatistics, ConnectionPool}; -use zksync_types::basic_fri_types::AggregationRound; +use prover_dal::{Prover, ProverDals}; +use zksync_dal::ConnectionPool; +use zksync_types::{basic_fri_types::AggregationRound, prover_dal::JobCountStatistics}; use crate::house_keeper::periodic_job::PeriodicJob; @@ -11,11 +12,11 @@ const FRI_WITNESS_GENERATOR_SERVICE_NAME: &str = "fri_witness_generator"; #[derive(Debug)] pub struct FriWitnessGeneratorStatsReporter { reporting_interval_ms: u64, - pool: ConnectionPool, + pool: ConnectionPool, } impl FriWitnessGeneratorStatsReporter { - pub fn new(pool: ConnectionPool, reporting_interval_ms: u64) -> Self { + pub fn new(pool: ConnectionPool, reporting_interval_ms: u64) -> Self { Self { reporting_interval_ms, pool, diff --git a/core/lib/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs b/core/lib/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs index df9208f1f451..eadcc59c0c02 100644 --- a/core/lib/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs +++ b/core/lib/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs @@ -1,4 +1,5 @@ use async_trait::async_trait; +use prover_dal::{Prover, ProverDals}; use zksync_dal::ConnectionPool; use crate::house_keeper::periodic_job::PeriodicJob; @@ -6,11 +7,11 @@ use crate::house_keeper::periodic_job::PeriodicJob; #[derive(Debug)] pub struct WaitingToQueuedFriWitnessJobMover { job_moving_interval_ms: u64, - pool: ConnectionPool, + pool: ConnectionPool, } impl WaitingToQueuedFriWitnessJobMover { - pub fn new(job_mover_interval_ms: u64, pool: ConnectionPool) -> Self { + pub fn new(job_mover_interval_ms: u64, pool: ConnectionPool) -> Self { Self { job_moving_interval_ms: job_mover_interval_ms, pool, diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index 1f056057481f..08e08e8b4e1f 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -12,6 +12,7 @@ use api_server::tx_sender::master_pool_sink::MasterPoolSink; use fee_model::{ApiFeeInputProvider, BatchFeeModelInputProvider, MainNodeFeeInputProvider}; use futures::channel::oneshot; use prometheus_exporter::PrometheusExporterConfig; +use prover_dal::Prover; use temp_config_store::{Secrets, TempConfigStore}; use tokio::{sync::watch, task::JoinHandle}; use zksync_circuit_breaker::{ @@ -32,7 +33,8 @@ use zksync_config::{ ApiConfig, ContractsConfig, DBConfig, ETHSenderConfig, PostgresConfig, }; use zksync_contracts::{governance_contract, BaseSystemContracts}; -use zksync_dal::{healthcheck::ConnectionPoolHealthCheck, ConnectionPool}; +use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Server, ServerDals}; +use zksync_db_connection::healthcheck::ConnectionPoolHealthCheck; use zksync_eth_client::{ clients::{PKSigningClient, QueryClient}, BoundEthInterface, CallFunctionArgs, EthInterface, @@ -115,7 +117,7 @@ pub async fn genesis_init( wait_for_set_chain_id: bool, ) -> anyhow::Result<()> { let db_url = postgres_config.master_url()?; - let pool = ConnectionPool::singleton(db_url) + let pool = ConnectionPool::::singleton(db_url) .build() .await .context("failed to build connection_pool")?; @@ -202,7 +204,7 @@ pub async fn genesis_init( pub async fn is_genesis_needed(postgres_config: &PostgresConfig) -> bool { let db_url = postgres_config.master_url().unwrap(); - let pool = ConnectionPool::singleton(db_url) + let pool = ConnectionPool::::singleton(db_url) .build() .await .expect("failed to build connection_pool"); @@ -313,22 +315,24 @@ pub async fn initialize_components( let db_config = configs.db_config.clone().context("db_config")?; let postgres_config = configs.postgres_config.clone().context("postgres_config")?; + if let Some(threshold) = postgres_config.slow_query_threshold() { - ConnectionPool::global_config().set_slow_query_threshold(threshold)?; + ConnectionPool::::global_config().set_slow_query_threshold(threshold)?; } if let Some(threshold) = postgres_config.long_connection_threshold() { - ConnectionPool::global_config().set_long_connection_threshold(threshold)?; + ConnectionPool::::global_config().set_long_connection_threshold(threshold)?; } let pool_size = postgres_config.max_connections()?; - let connection_pool = ConnectionPool::builder(postgres_config.master_url()?, pool_size) - .build() - .await - .context("failed to build connection_pool")?; + let connection_pool = + ConnectionPool::::builder(postgres_config.master_url()?, pool_size) + .build() + .await + .context("failed to build connection_pool")?; // We're most interested in setting acquire / statement timeouts for the API server, which puts the most load // on Postgres. let replica_connection_pool = - ConnectionPool::builder(postgres_config.replica_url()?, pool_size) + ConnectionPool::::builder(postgres_config.replica_url()?, pool_size) .set_acquire_timeout(postgres_config.acquire_timeout()) .set_statement_timeout(postgres_config.statement_timeout()) .build() @@ -627,7 +631,7 @@ pub async fn initialize_components( if components.contains(&Component::EthWatcher) { let started_at = Instant::now(); tracing::info!("initializing ETH-Watcher"); - let eth_watch_pool = ConnectionPool::singleton(postgres_config.master_url()?) + let eth_watch_pool = ConnectionPool::::singleton(postgres_config.master_url()?) .build() .await .context("failed to build eth_watch_pool")?; @@ -656,7 +660,7 @@ pub async fn initialize_components( if components.contains(&Component::EthTxAggregator) { let started_at = Instant::now(); tracing::info!("initializing ETH-TxAggregator"); - let eth_sender_pool = ConnectionPool::singleton(postgres_config.master_url()?) + let eth_sender_pool = ConnectionPool::::singleton(postgres_config.master_url()?) .build() .await .context("failed to build eth_sender_pool")?; @@ -703,7 +707,7 @@ pub async fn initialize_components( if components.contains(&Component::EthTxManager) { let started_at = Instant::now(); tracing::info!("initializing ETH-TxManager"); - let eth_manager_pool = ConnectionPool::singleton(postgres_config.master_url()?) + let eth_manager_pool = ConnectionPool::::singleton(postgres_config.master_url()?) .build() .await .context("failed to build eth_manager_pool")?; @@ -745,10 +749,11 @@ pub async fn initialize_components( .context("add_trees_to_task_futures()")?; if components.contains(&Component::BasicWitnessInputProducer) { - let singleton_connection_pool = ConnectionPool::singleton(postgres_config.master_url()?) - .build() - .await - .context("failed to build singleton connection_pool")?; + let singleton_connection_pool = + ConnectionPool::::singleton(postgres_config.master_url()?) + .build() + .await + .context("failed to build singleton connection_pool")?; let network_config = configs.network_config.clone().context("network_config")?; add_basic_witness_input_producer_to_task_futures( &mut task_futures, @@ -784,10 +789,11 @@ pub async fn initialize_components( } if components.contains(&Component::CommitmentGenerator) { - let commitment_generator_pool = ConnectionPool::singleton(postgres_config.master_url()?) - .build() - .await - .context("failed to build commitment_generator_pool")?; + let commitment_generator_pool = + ConnectionPool::::singleton(postgres_config.master_url()?) + .build() + .await + .context("failed to build commitment_generator_pool")?; let commitment_generator = CommitmentGenerator::new(commitment_generator_pool); app_health.insert_component(commitment_generator.health_check()); task_futures.push(tokio::spawn( @@ -820,7 +826,7 @@ async fn add_state_keeper_to_task_futures( object_store: Arc, stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { - let pool_builder = ConnectionPool::singleton(postgres_config.master_url()?); + let pool_builder = ConnectionPool::::singleton(postgres_config.master_url()?); let state_keeper_pool = pool_builder .build() .await @@ -968,7 +974,7 @@ async fn run_tree( let tree_health_check = metadata_calculator.tree_health_check(); app_health.insert_component(tree_health_check); - let pool = ConnectionPool::singleton(postgres_config.master_url()?) + let pool = ConnectionPool::::singleton(postgres_config.master_url()?) .build() .await .context("failed to build connection pool")?; @@ -983,7 +989,7 @@ async fn run_tree( async fn add_basic_witness_input_producer_to_task_futures( task_futures: &mut Vec>>, - connection_pool: &ConnectionPool, + connection_pool: &ConnectionPool, store_factory: &ObjectStoreFactory, l2_chain_id: L2ChainId, stop_receiver: watch::Receiver, @@ -1016,7 +1022,7 @@ async fn add_house_keeper_to_task_futures( .clone() .context("house_keeper_config")?; let postgres_config = configs.postgres_config.clone().context("postgres_config")?; - let connection_pool = ConnectionPool::builder( + let connection_pool = ConnectionPool::::builder( postgres_config.replica_url()?, postgres_config.max_connections()?, ) @@ -1026,9 +1032,7 @@ async fn add_house_keeper_to_task_futures( let pool_for_metrics = connection_pool.clone(); task_futures.push(tokio::spawn(async move { - pool_for_metrics - .run_postgres_metrics_scraping(Duration::from_secs(60)) - .await; + PostgresMetrics::run_scraping(pool_for_metrics, Duration::from_secs(60)).await; Ok(()) })); @@ -1037,7 +1041,7 @@ async fn add_house_keeper_to_task_futures( connection_pool.clone(), ); - let prover_connection_pool = ConnectionPool::builder( + let prover_connection_pool = ConnectionPool::::builder( postgres_config.prover_url()?, postgres_config.max_connections()?, ) @@ -1123,7 +1127,7 @@ async fn add_house_keeper_to_task_futures( fn build_storage_caches( configs: &TempConfigStore, - replica_connection_pool: &ConnectionPool, + replica_connection_pool: &ConnectionPool, task_futures: &mut Vec>>, stop_receiver: watch::Receiver, ) -> anyhow::Result { @@ -1149,8 +1153,8 @@ async fn build_tx_sender( tx_sender_config: &TxSenderConfig, web3_json_config: &Web3JsonRpcConfig, state_keeper_config: &StateKeeperConfig, - replica_pool: ConnectionPool, - master_pool: ConnectionPool, + replica_pool: ConnectionPool, + master_pool: ConnectionPool, batch_fee_model_input_provider: Arc, storage_caches: PostgresStorageCaches, ) -> (TxSender, VmConcurrencyBarrier) { @@ -1189,8 +1193,8 @@ async fn run_http_api( state_keeper_config: &StateKeeperConfig, internal_api: &InternalApiConfig, api_config: &ApiConfig, - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, + master_connection_pool: ConnectionPool, + replica_connection_pool: ConnectionPool, stop_receiver: watch::Receiver, batch_fee_model_input_provider: Arc, with_debug_namespace: bool, @@ -1213,7 +1217,7 @@ async fn run_http_api( } namespaces.push(Namespace::Snapshots); - let updaters_pool = ConnectionPool::builder(postgres_config.replica_url()?, 2) + let updaters_pool = ConnectionPool::::builder(postgres_config.replica_url()?, 2) .build() .await .context("failed to build last_miniblock_pool")?; @@ -1254,8 +1258,8 @@ async fn run_ws_api( internal_api: &InternalApiConfig, api_config: &ApiConfig, batch_fee_model_input_provider: Arc, - master_connection_pool: ConnectionPool, - replica_connection_pool: ConnectionPool, + master_connection_pool: ConnectionPool, + replica_connection_pool: ConnectionPool, stop_receiver: watch::Receiver, storage_caches: PostgresStorageCaches, ) -> anyhow::Result<()> { @@ -1269,7 +1273,7 @@ async fn run_ws_api( storage_caches, ) .await; - let last_miniblock_pool = ConnectionPool::singleton(postgres_config.replica_url()?) + let last_miniblock_pool = ConnectionPool::::singleton(postgres_config.replica_url()?) .build() .await .context("failed to build last_miniblock_pool")?; @@ -1321,7 +1325,7 @@ async fn circuit_breakers_for_components( .iter() .any(|c| matches!(c, Component::EthTxAggregator | Component::EthTxManager)) { - let pool = ConnectionPool::singleton(postgres_config.replica_url()?) + let pool = ConnectionPool::::singleton(postgres_config.replica_url()?) .build() .await .context("failed to build a connection pool")?; @@ -1334,7 +1338,7 @@ async fn circuit_breakers_for_components( Component::HttpApi | Component::WsApi | Component::ContractVerificationApi ) }) { - let pool = ConnectionPool::singleton(postgres_config.replica_url()?) + let pool = ConnectionPool::::singleton(postgres_config.replica_url()?) .build() .await?; circuit_breakers.push(Box::new(ReplicationLagChecker { diff --git a/core/lib/zksync_core/src/metadata_calculator/helpers.rs b/core/lib/zksync_core/src/metadata_calculator/helpers.rs index 6137f8e2f1f2..e07972e8247e 100644 --- a/core/lib/zksync_core/src/metadata_calculator/helpers.rs +++ b/core/lib/zksync_core/src/metadata_calculator/helpers.rs @@ -14,7 +14,7 @@ use serde::{Deserialize, Serialize}; use tokio::sync::mpsc; use tokio::sync::watch; use zksync_config::configs::database::MerkleTreeMode; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_health_check::{Health, HealthStatus}; use zksync_merkle_tree::{ domain::{TreeMetadata, ZkSyncTree, ZkSyncTreeReader}, @@ -399,7 +399,7 @@ pub(crate) struct L1BatchWithLogs { impl L1BatchWithLogs { pub async fn new( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, l1_batch_number: L1BatchNumber, ) -> Option { tracing::debug!("Loading storage logs data for L1 batch #{l1_batch_number}"); @@ -478,7 +478,7 @@ impl L1BatchWithLogs { #[cfg(test)] mod tests { use tempfile::TempDir; - use zksync_dal::ConnectionPool; + use zksync_dal::{ConnectionPool, Server}; use zksync_prover_interface::inputs::PrepareBasicCircuitsJob; use zksync_types::{L2ChainId, StorageKey, StorageLog}; @@ -491,7 +491,7 @@ mod tests { impl L1BatchWithLogs { /// Old, slower method of loading storage logs. We want to test its equivalence to the new implementation. async fn slow( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, l1_batch_number: L1BatchNumber, ) -> Option { let header = storage @@ -560,7 +560,7 @@ mod tests { #[tokio::test] async fn loaded_logs_equivalence_basics() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; ensure_genesis_state( &mut pool.access_storage().await.unwrap(), L2ChainId::from(270), @@ -585,7 +585,7 @@ mod tests { #[tokio::test] async fn loaded_logs_equivalence_with_zero_no_op_logs() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::from(270), &GenesisParams::mock()) .await @@ -621,7 +621,7 @@ mod tests { } async fn assert_log_equivalence( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, tree: &mut AsyncTree, l1_batch_number: L1BatchNumber, ) { @@ -676,7 +676,7 @@ mod tests { #[tokio::test] async fn loaded_logs_equivalence_with_non_zero_no_op_logs() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::from(270), &GenesisParams::mock()) .await @@ -723,7 +723,7 @@ mod tests { #[tokio::test] async fn loaded_logs_equivalence_with_protective_reads() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::from(270), &GenesisParams::mock()) .await diff --git a/core/lib/zksync_core/src/metadata_calculator/mod.rs b/core/lib/zksync_core/src/metadata_calculator/mod.rs index 7ea8e2078c25..a4bcb9191580 100644 --- a/core/lib/zksync_core/src/metadata_calculator/mod.rs +++ b/core/lib/zksync_core/src/metadata_calculator/mod.rs @@ -12,7 +12,7 @@ use zksync_config::configs::{ chain::OperationsManagerConfig, database::{MerkleTreeConfig, MerkleTreeMode}, }; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use zksync_health_check::{HealthUpdater, ReactiveHealthCheck}; use zksync_object_store::ObjectStore; @@ -143,7 +143,7 @@ impl MetadataCalculator { pub async fn run( self, - pool: ConnectionPool, + pool: ConnectionPool, stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { let tree = self.create_tree().await?; diff --git a/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs b/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs index 4ca9215f3e6a..cbe088509b80 100644 --- a/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs +++ b/core/lib/zksync_core/src/metadata_calculator/recovery/mod.rs @@ -34,7 +34,7 @@ use anyhow::Context as _; use async_trait::async_trait; use futures::future; use tokio::sync::{watch, Mutex, Semaphore}; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_health_check::HealthUpdater; use zksync_merkle_tree::TreeEntry; use zksync_types::{ @@ -120,7 +120,10 @@ impl SnapshotParameters { /// (i.e., not changed after a node restart). const DESIRED_CHUNK_SIZE: u64 = 200_000; - async fn new(pool: &ConnectionPool, recovery: &SnapshotRecoveryStatus) -> anyhow::Result { + async fn new( + pool: &ConnectionPool, + recovery: &SnapshotRecoveryStatus, + ) -> anyhow::Result { let miniblock = recovery.miniblock_number; let expected_root_hash = recovery.l1_batch_root_hash; @@ -156,7 +159,7 @@ impl GenericAsyncTree { /// if necessary. pub async fn ensure_ready( self, - pool: &ConnectionPool, + pool: &ConnectionPool, stop_receiver: &watch::Receiver, health_updater: &HealthUpdater, ) -> anyhow::Result> { @@ -207,7 +210,7 @@ impl AsyncTreeRecovery { mut self, snapshot: SnapshotParameters, mut options: RecoveryOptions<'_>, - pool: &ConnectionPool, + pool: &ConnectionPool, stop_receiver: &watch::Receiver, ) -> anyhow::Result> { let chunk_count = options.chunk_count; @@ -268,7 +271,7 @@ impl AsyncTreeRecovery { /// Filters out `key_chunks` for which recovery was successfully performed. async fn filter_chunks( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, snapshot_miniblock: MiniblockNumber, key_chunks: &[ops::RangeInclusive], ) -> anyhow::Result>> { @@ -315,7 +318,7 @@ impl AsyncTreeRecovery { tree: &Mutex, snapshot_miniblock: MiniblockNumber, key_chunk: ops::RangeInclusive, - pool: &ConnectionPool, + pool: &ConnectionPool, stop_receiver: &watch::Receiver, ) -> anyhow::Result<()> { let acquire_connection_latency = @@ -389,7 +392,7 @@ impl AsyncTreeRecovery { } async fn get_snapshot_recovery( - pool: &ConnectionPool, + pool: &ConnectionPool, ) -> anyhow::Result> { let mut storage = pool.access_storage_tagged("metadata_calculator").await?; Ok(storage diff --git a/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs b/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs index af6acfad4c95..6a4118b3a86f 100644 --- a/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs @@ -10,6 +10,7 @@ use zksync_config::configs::{ chain::OperationsManagerConfig, database::{MerkleTreeConfig, MerkleTreeMode}, }; +use zksync_dal::ServerDals; use zksync_health_check::{CheckHealth, HealthStatus, ReactiveHealthCheck}; use zksync_merkle_tree::{domain::ZkSyncTree, TreeInstruction}; use zksync_types::{L1BatchNumber, L2ChainId, ProtocolVersionId, StorageLog}; @@ -59,7 +60,7 @@ async fn create_tree_recovery(path: PathBuf, l1_batch: L1BatchNumber) -> AsyncTr #[tokio::test] async fn basic_recovery_workflow() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let snapshot_recovery = prepare_recovery_snapshot_with_genesis(&pool, &temp_dir).await; let snapshot = SnapshotParameters::new(&pool, &snapshot_recovery) @@ -93,7 +94,7 @@ async fn basic_recovery_workflow() { } async fn prepare_recovery_snapshot_with_genesis( - pool: &ConnectionPool, + pool: &ConnectionPool, temp_dir: &TempDir, ) -> SnapshotRecoveryStatus { let mut storage = pool.access_storage().await.unwrap(); @@ -172,7 +173,7 @@ impl HandleRecoveryEvent for TestEventListener { #[test_casing(3, [5, 7, 8])] #[tokio::test] async fn recovery_fault_tolerance(chunk_count: u64) { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let snapshot_recovery = prepare_recovery_snapshot_with_genesis(&pool, &temp_dir).await; @@ -238,7 +239,7 @@ impl RecoveryWorkflowCase { #[test_casing(2, RecoveryWorkflowCase::ALL)] #[tokio::test] async fn entire_recovery_workflow(case: RecoveryWorkflowCase) { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; // Emulate the recovered view of Postgres. Unlike with previous tests, we don't perform genesis. let snapshot_logs = gen_storage_logs(100..300, 1).pop().unwrap(); let mut storage = pool.access_storage().await.unwrap(); diff --git a/core/lib/zksync_core/src/metadata_calculator/tests.rs b/core/lib/zksync_core/src/metadata_calculator/tests.rs index df6b849a72c3..2040ef1c3118 100644 --- a/core/lib/zksync_core/src/metadata_calculator/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/tests.rs @@ -10,7 +10,7 @@ use zksync_config::configs::{ chain::OperationsManagerConfig, database::{MerkleTreeConfig, MerkleTreeMode}, }; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_health_check::{CheckHealth, HealthStatus}; use zksync_merkle_tree::domain::ZkSyncTree; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; @@ -42,7 +42,7 @@ where #[tokio::test] async fn genesis_creation() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let (calculator, _) = setup_calculator(temp_dir.path(), &pool).await; @@ -58,7 +58,7 @@ async fn genesis_creation() { #[tokio::test] async fn basic_workflow() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); @@ -85,7 +85,7 @@ async fn basic_workflow() { assert_eq!(tree.next_l1_batch_number(), L1BatchNumber(2)); } -async fn expected_tree_hash(pool: &ConnectionPool) -> H256 { +async fn expected_tree_hash(pool: &ConnectionPool) -> H256 { let mut storage = pool.access_storage().await.unwrap(); let sealed_l1_batch_number = storage .blocks_dal() @@ -104,7 +104,7 @@ async fn expected_tree_hash(pool: &ConnectionPool) -> H256 { #[tokio::test] async fn status_receiver_has_correct_states() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let (mut calculator, _) = setup_calculator(temp_dir.path(), &pool).await; @@ -152,7 +152,7 @@ async fn status_receiver_has_correct_states() { #[tokio::test] async fn multi_l1_batch_workflow() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; // Collect all storage logs in a single L1 batch let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); @@ -188,7 +188,7 @@ async fn multi_l1_batch_workflow() { #[tokio::test] async fn running_metadata_calculator_with_additional_blocks() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let calculator = setup_lightweight_calculator(temp_dir.path(), &pool).await; @@ -238,7 +238,7 @@ async fn running_metadata_calculator_with_additional_blocks() { #[tokio::test] async fn shutting_down_calculator() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let (merkle_tree_config, mut operation_config) = create_config(temp_dir.path(), MerkleTreeMode::Lightweight); @@ -263,7 +263,7 @@ async fn test_postgres_backup_recovery( sleep_between_batches: bool, insert_batch_without_metadata: bool, ) { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let calculator = setup_lightweight_calculator(temp_dir.path(), &pool).await; reset_db_state(&pool, 5).await; @@ -354,7 +354,7 @@ async fn postgres_backup_recovery_with_excluded_metadata() { pub(crate) async fn setup_calculator( db_path: &Path, - pool: &ConnectionPool, + pool: &ConnectionPool, ) -> (MetadataCalculator, Arc) { let store_factory = ObjectStoreFactory::mock(); let store = store_factory.create_store().await; @@ -365,7 +365,10 @@ pub(crate) async fn setup_calculator( (calculator, store_factory.create_store().await) } -async fn setup_lightweight_calculator(db_path: &Path, pool: &ConnectionPool) -> MetadataCalculator { +async fn setup_lightweight_calculator( + db_path: &Path, + pool: &ConnectionPool, +) -> MetadataCalculator { let (db_config, operation_config) = create_config(db_path, MerkleTreeMode::Lightweight); setup_calculator_with_options(&db_config, &operation_config, pool, None).await } @@ -389,7 +392,7 @@ fn create_config( async fn setup_calculator_with_options( merkle_tree_config: &MerkleTreeConfig, operation_config: &OperationsManagerConfig, - pool: &ConnectionPool, + pool: &ConnectionPool, object_store: Option>, ) -> MetadataCalculator { let calculator_config = @@ -413,7 +416,7 @@ fn path_to_string(path: &Path) -> String { pub(crate) async fn run_calculator( mut calculator: MetadataCalculator, - pool: ConnectionPool, + pool: ConnectionPool, ) -> H256 { let (stop_sx, stop_rx) = watch::channel(false); let (delay_sx, mut delay_rx) = mpsc::unbounded_channel(); @@ -435,7 +438,7 @@ pub(crate) async fn run_calculator( delayer_handle.await.unwrap() } -pub(crate) async fn reset_db_state(pool: &ConnectionPool, num_batches: usize) { +pub(crate) async fn reset_db_state(pool: &ConnectionPool, num_batches: usize) { let mut storage = pool.access_storage().await.unwrap(); // Drops all L1 batches (except the L1 batch with number 0) and their storage logs. storage @@ -469,7 +472,7 @@ pub(crate) async fn reset_db_state(pool: &ConnectionPool, num_batches: usize) { } pub(super) async fn extend_db_state( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, new_logs: impl IntoIterator>, ) { let mut storage = storage.start_transaction().await.unwrap(); @@ -484,7 +487,7 @@ pub(super) async fn extend_db_state( } pub(super) async fn extend_db_state_from_l1_batch( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, next_l1_batch: L1BatchNumber, new_logs: impl IntoIterator>, ) { @@ -522,7 +525,7 @@ pub(super) async fn extend_db_state_from_l1_batch( } async fn insert_initial_writes_for_batch( - connection: &mut StorageProcessor<'_>, + connection: &mut StorageProcessor<'_, Server>, l1_batch_number: L1BatchNumber, ) { let written_non_zero_slots: Vec<_> = connection @@ -593,7 +596,7 @@ pub(crate) fn gen_storage_logs( } async fn remove_l1_batches( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, last_l1_batch_to_keep: L1BatchNumber, ) -> Vec { let sealed_l1_batch_number = storage @@ -629,7 +632,7 @@ async fn remove_l1_batches( #[tokio::test] async fn deduplication_works_as_expected() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::from(270), &GenesisParams::mock()) .await diff --git a/core/lib/zksync_core/src/metadata_calculator/updater.rs b/core/lib/zksync_core/src/metadata_calculator/updater.rs index a7b37533c66f..b8c422d95364 100644 --- a/core/lib/zksync_core/src/metadata_calculator/updater.rs +++ b/core/lib/zksync_core/src/metadata_calculator/updater.rs @@ -5,7 +5,7 @@ use std::{ops, sync::Arc, time::Instant}; use anyhow::Context as _; use futures::{future, FutureExt}; use tokio::sync::watch; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_health_check::HealthUpdater; use zksync_merkle_tree::domain::TreeMetadata; use zksync_object_store::ObjectStore; @@ -86,7 +86,7 @@ impl TreeUpdater { /// is slow for whatever reason. async fn process_multiple_batches( &mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, l1_batch_numbers: ops::RangeInclusive, ) -> L1BatchNumber { let start = Instant::now(); @@ -167,7 +167,7 @@ impl TreeUpdater { async fn step( &mut self, - mut storage: StorageProcessor<'_>, + mut storage: StorageProcessor<'_, Server>, next_l1_batch_to_seal: &mut L1BatchNumber, ) { let Some(last_sealed_l1_batch) = storage @@ -199,7 +199,7 @@ impl TreeUpdater { pub async fn loop_updating_tree( mut self, delayer: Delayer, - pool: &ConnectionPool, + pool: &ConnectionPool, mut stop_receiver: watch::Receiver, health_updater: HealthUpdater, ) -> anyhow::Result<()> { @@ -312,7 +312,7 @@ impl TreeUpdater { } async fn check_initial_writes_consistency( - connection: &mut StorageProcessor<'_>, + connection: &mut StorageProcessor<'_, Server>, l1_batch_number: L1BatchNumber, tree_initial_writes: &[InitialStorageWrite], ) { diff --git a/core/lib/zksync_core/src/proof_data_handler/mod.rs b/core/lib/zksync_core/src/proof_data_handler/mod.rs index 9f009328ca05..c0f6af1fecf2 100644 --- a/core/lib/zksync_core/src/proof_data_handler/mod.rs +++ b/core/lib/zksync_core/src/proof_data_handler/mod.rs @@ -7,7 +7,7 @@ use zksync_config::{ configs::{proof_data_handler::ProtocolVersionLoadingMode, ProofDataHandlerConfig}, ContractsConfig, }; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use zksync_object_store::ObjectStore; use zksync_prover_interface::api::{ProofGenerationDataRequest, SubmitProofRequest}; use zksync_types::{ @@ -35,7 +35,7 @@ pub async fn run_server( config: ProofDataHandlerConfig, contracts_config: ContractsConfig, blob_store: Arc, - pool: ConnectionPool, + pool: ConnectionPool, mut stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { let bind_address = SocketAddr::from(([0, 0, 0, 0], config.http_port)); diff --git a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs index 0fc40ddfdf34..9a2b2e61f214 100644 --- a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs +++ b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs @@ -9,7 +9,7 @@ use axum::{ use zksync_config::configs::{ proof_data_handler::ProtocolVersionLoadingMode, ProofDataHandlerConfig, }; -use zksync_dal::{ConnectionPool, SqlxError}; +use zksync_dal::{ConnectionPool, Server, ServerDals, SqlxError}; use zksync_object_store::{ObjectStore, ObjectStoreError}; use zksync_prover_interface::api::{ ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, @@ -27,7 +27,7 @@ use zksync_utils::u256_to_h256; #[derive(Clone)] pub(crate) struct RequestProcessor { blob_store: Arc, - pool: ConnectionPool, + pool: ConnectionPool, config: ProofDataHandlerConfig, l1_verifier_config: Option, } @@ -67,7 +67,7 @@ impl IntoResponse for RequestProcessorError { impl RequestProcessor { pub(crate) fn new( blob_store: Arc, - pool: ConnectionPool, + pool: ConnectionPool, config: ProofDataHandlerConfig, l1_verifier_config: Option, ) -> Self { diff --git a/core/lib/zksync_core/src/reorg_detector/mod.rs b/core/lib/zksync_core/src/reorg_detector/mod.rs index b550fde30658..a943e6aab455 100644 --- a/core/lib/zksync_core/src/reorg_detector/mod.rs +++ b/core/lib/zksync_core/src/reorg_detector/mod.rs @@ -3,7 +3,7 @@ use std::{fmt, time::Duration}; use anyhow::Context as _; use async_trait::async_trait; use tokio::sync::watch; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server, ServerDals}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_types::{L1BatchNumber, MiniblockNumber, H256}; use zksync_web3_decl::{ @@ -215,7 +215,7 @@ impl HandleReorgDetectorEvent for HealthUpdater { pub struct ReorgDetector { client: Box, event_handler: Box, - pool: ConnectionPool, + pool: ConnectionPool, sleep_interval: Duration, health_check: ReactiveHealthCheck, } @@ -223,7 +223,7 @@ pub struct ReorgDetector { impl ReorgDetector { const DEFAULT_SLEEP_INTERVAL: Duration = Duration::from_secs(5); - pub fn new(client: HttpClient, pool: ConnectionPool) -> Self { + pub fn new(client: HttpClient, pool: ConnectionPool) -> Self { let (health_check, health_updater) = ReactiveHealthCheck::new("reorg_detector"); Self { client: Box::new(client), diff --git a/core/lib/zksync_core/src/reorg_detector/tests.rs b/core/lib/zksync_core/src/reorg_detector/tests.rs index 3e306d134c4a..57fa3eff6b59 100644 --- a/core/lib/zksync_core/src/reorg_detector/tests.rs +++ b/core/lib/zksync_core/src/reorg_detector/tests.rs @@ -8,7 +8,7 @@ use std::{ use assert_matches::assert_matches; use test_casing::{test_casing, Product}; use tokio::sync::mpsc; -use zksync_dal::StorageProcessor; +use zksync_dal::{ServerDals, StorageProcessor}; use zksync_types::{ block::{MiniblockHasher, MiniblockHeader}, L2ChainId, ProtocolVersion, @@ -21,7 +21,7 @@ use crate::{ utils::testonly::{create_l1_batch, create_miniblock}, }; -async fn store_miniblock(storage: &mut StorageProcessor<'_>, number: u32, hash: H256) { +async fn store_miniblock(storage: &mut StorageProcessor<'_, Server>, number: u32, hash: H256) { let header = MiniblockHeader { hash, ..create_miniblock(number) @@ -33,7 +33,7 @@ async fn store_miniblock(storage: &mut StorageProcessor<'_>, number: u32, hash: .unwrap(); } -async fn seal_l1_batch(storage: &mut StorageProcessor<'_>, number: u32, hash: H256) { +async fn seal_l1_batch(storage: &mut StorageProcessor<'_, Server>, number: u32, hash: H256) { let header = create_l1_batch(number); storage .blocks_dal() @@ -152,7 +152,7 @@ impl HandleReorgDetectorEvent for mpsc::UnboundedSender<(MiniblockNumber, L1Batc } } -fn create_mock_detector(client: MockMainNodeClient, pool: ConnectionPool) -> ReorgDetector { +fn create_mock_detector(client: MockMainNodeClient, pool: ConnectionPool) -> ReorgDetector { let (health_check, health_updater) = ReactiveHealthCheck::new("reorg_detector"); ReorgDetector { client: Box::new(client), @@ -166,7 +166,7 @@ fn create_mock_detector(client: MockMainNodeClient, pool: ConnectionPool) -> Reo #[test_casing(4, Product(([false, true], [false, true])))] #[tokio::test] async fn normal_reorg_function(snapshot_recovery: bool, with_transient_errors: bool) { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let mut client = MockMainNodeClient::default(); if snapshot_recovery { @@ -250,7 +250,7 @@ async fn normal_reorg_function(snapshot_recovery: bool, with_transient_errors: b #[tokio::test] async fn detector_stops_on_fatal_rpc_error() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await @@ -267,7 +267,7 @@ async fn detector_stops_on_fatal_rpc_error() { #[tokio::test] async fn reorg_is_detected_on_batch_hash_mismatch() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let genesis_root_hash = ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) @@ -313,7 +313,7 @@ async fn reorg_is_detected_on_batch_hash_mismatch() { #[tokio::test] async fn reorg_is_detected_on_miniblock_hash_mismatch() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let mut client = MockMainNodeClient::default(); let genesis_root_hash = @@ -384,7 +384,7 @@ async fn reorg_is_detected_on_historic_batch_hash_mismatch( (1_u32..=10, last_correct_batch) }; - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let earliest_l1_batch_number = l1_batch_numbers.start() - 1; { let mut storage = pool.access_storage().await.unwrap(); @@ -463,7 +463,7 @@ async fn reorg_is_detected_on_historic_batch_hash_mismatch( #[tokio::test] async fn stopping_reorg_detector_while_waiting_for_l1_batch() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); assert!(storage.blocks_dal().is_genesis_needed().await.unwrap()); drop(storage); @@ -478,7 +478,7 @@ async fn stopping_reorg_detector_while_waiting_for_l1_batch() { #[tokio::test] async fn detector_errors_on_earliest_batch_hash_mismatch() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let genesis_root_hash = ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) @@ -503,7 +503,7 @@ async fn detector_errors_on_earliest_batch_hash_mismatch() { #[tokio::test] async fn detector_errors_on_earliest_batch_hash_mismatch_with_snapshot_recovery() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut client = MockMainNodeClient::default(); client .l1_batch_root_hashes @@ -532,7 +532,7 @@ async fn detector_errors_on_earliest_batch_hash_mismatch_with_snapshot_recovery( #[tokio::test] async fn reorg_is_detected_without_waiting_for_main_node_to_catch_up() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let genesis_root_hash = ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/main_executor.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/main_executor.rs index 3a01c1f6c2d4..5850321e57c2 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/main_executor.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/main_executor.rs @@ -12,7 +12,7 @@ use multivm::{ }; use once_cell::sync::OnceCell; use tokio::sync::{mpsc, watch}; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use zksync_state::{RocksdbStorage, StorageView, WriteStorage}; use zksync_types::{vm_trace::Call, Transaction, U256}; use zksync_utils::bytecode::CompressedBytecodeInfo; @@ -31,7 +31,7 @@ use crate::{ #[derive(Debug, Clone)] pub struct MainBatchExecutor { state_keeper_db_path: String, - pool: ConnectionPool, + pool: ConnectionPool, save_call_traces: bool, max_allowed_tx_gas_limit: U256, upload_witness_inputs_to_gcs: bool, @@ -42,7 +42,7 @@ pub struct MainBatchExecutor { impl MainBatchExecutor { pub fn new( state_keeper_db_path: String, - pool: ConnectionPool, + pool: ConnectionPool, max_allowed_tx_gas_limit: U256, save_call_traces: bool, upload_witness_inputs_to_gcs: bool, diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs index 236435a32e73..6e5adb658945 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs @@ -1,6 +1,6 @@ use assert_matches::assert_matches; use test_casing::test_casing; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use zksync_test_account::Account; use zksync_types::{get_nonce_key, utils::storage_key_for_eth_balance, PriorityOpId}; @@ -32,7 +32,7 @@ fn assert_reverted(execution_result: &TxExecutionResult) { /// Checks that we can successfully execute a single L2 tx in batch executor. #[tokio::test] async fn execute_l2_tx() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); tester.genesis().await; @@ -75,7 +75,7 @@ impl SnapshotRecoveryMutation { #[tokio::test] async fn execute_l2_tx_after_snapshot_recovery(mutation: Option) { let mut alice = Account::random(); - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut storage_snapshot = StorageSnapshot::new(&connection_pool, &mut alice, 10).await; assert!(storage_snapshot.storage_logs.len() > 10); // sanity check @@ -99,7 +99,7 @@ async fn execute_l2_tx_after_snapshot_recovery(mutation: Option::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); @@ -116,7 +116,7 @@ async fn execute_l1_tx() { /// Checks that we can successfully execute a single L2 tx and a single L1 tx in batch executor. #[tokio::test] async fn execute_l2_and_l1_txs() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); @@ -136,7 +136,7 @@ async fn execute_l2_and_l1_txs() { /// Checks that we can successfully rollback the transaction and execute it once again. #[tokio::test] async fn rollback() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); @@ -180,7 +180,7 @@ async fn rollback() { /// Checks that incorrect transactions are marked as rejected. #[tokio::test] async fn reject_tx() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); @@ -196,7 +196,7 @@ async fn reject_tx() { /// Checks that tx with too big gas limit is correctly rejected. #[tokio::test] async fn too_big_gas_limit() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); @@ -243,7 +243,7 @@ async fn too_big_gas_limit() { /// Checks that we can't execute the same transaction twice. #[tokio::test] async fn tx_cant_be_reexecuted() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); @@ -263,7 +263,7 @@ async fn tx_cant_be_reexecuted() { /// Checks that we can deploy and call the loadnext contract. #[tokio::test] async fn deploy_and_call_loadtest() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); @@ -289,7 +289,7 @@ async fn deploy_and_call_loadtest() { /// Checks that a tx that is reverted by the VM still can be included into a batch. #[tokio::test] async fn execute_reverted_tx() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::new(connection_pool); @@ -316,7 +316,7 @@ async fn execute_reverted_tx() { /// a batch with different operations, both successful and not. #[tokio::test] async fn execute_realistic_scenario() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let mut bob = Account::random(); @@ -366,7 +366,7 @@ async fn execute_realistic_scenario() { /// Checks that we handle the bootloader out of gas error on execution phase. #[tokio::test] async fn bootloader_out_of_gas_for_any_tx() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let tester = Tester::with_config( @@ -392,7 +392,7 @@ async fn bootloader_out_of_gas_for_any_tx() { #[tokio::test] #[ignore] // This test fails. async fn bootloader_tip_out_of_gas() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut alice = Account::random(); let mut tester = Tester::new(connection_pool); diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs index cfcd1caad8b5..9faffa960a26 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs @@ -11,7 +11,7 @@ use tempfile::TempDir; use tokio::sync::watch; use zksync_config::configs::chain::StateKeeperConfig; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server, ServerDals}; use zksync_test_account::{Account, DeployContractsTx, TxType}; use zksync_types::{ block::MiniblockHasher, ethabi::Token, fee::Fee, snapshots::SnapshotRecoveryStatus, @@ -67,16 +67,16 @@ impl TestConfig { pub(super) struct Tester { fee_account: Address, db_dir: TempDir, - pool: ConnectionPool, + pool: ConnectionPool, config: TestConfig, } impl Tester { - pub(super) fn new(pool: ConnectionPool) -> Self { + pub(super) fn new(pool: ConnectionPool) -> Self { Self::with_config(pool, TestConfig::new()) } - pub(super) fn with_config(pool: ConnectionPool, config: TestConfig) -> Self { + pub(super) fn with_config(pool: ConnectionPool, config: TestConfig) -> Self { Self { fee_account: Address::repeat_byte(0x01), db_dir: TempDir::new().unwrap(), @@ -376,7 +376,7 @@ pub(super) struct StorageSnapshot { impl StorageSnapshot { /// Generates a new snapshot by executing the specified number of transactions, each in a separate miniblock. pub async fn new( - connection_pool: &ConnectionPool, + connection_pool: &ConnectionPool, alice: &mut Account, transaction_count: u32, ) -> Self { @@ -469,7 +469,7 @@ impl StorageSnapshot { } /// Recovers storage from this snapshot. - pub async fn recover(self, connection_pool: &ConnectionPool) -> SnapshotRecoveryStatus { + pub async fn recover(self, connection_pool: &ConnectionPool) -> SnapshotRecoveryStatus { let snapshot_logs: Vec<_> = self .storage_logs .into_iter() diff --git a/core/lib/zksync_core/src/state_keeper/io/common/mod.rs b/core/lib/zksync_core/src/state_keeper/io/common/mod.rs index 64001d7d502d..5303af221e5c 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common/mod.rs @@ -2,7 +2,7 @@ use std::time::Duration; use anyhow::Context; use multivm::interface::{L1BatchEnv, SystemEnv}; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_types::{L1BatchNumber, MiniblockNumber, H256}; use super::PendingBatchData; @@ -30,7 +30,7 @@ pub(crate) struct IoCursor { impl IoCursor { /// Loads the cursor from Postgres. - pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { + pub async fn new(storage: &mut StorageProcessor<'_, Server>) -> anyhow::Result { let last_sealed_l1_batch_number = storage .blocks_dal() .get_sealed_l1_batch_number() @@ -88,7 +88,7 @@ impl IoCursor { /// /// Propagates DB errors. Also returns an error if environment doesn't correspond to a pending L1 batch. pub(crate) async fn load_pending_batch( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, system_env: SystemEnv, l1_batch_env: L1BatchEnv, ) -> anyhow::Result { diff --git a/core/lib/zksync_core/src/state_keeper/io/common/tests.rs b/core/lib/zksync_core/src/state_keeper/io/common/tests.rs index a59f711a576f..246a70da7756 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common/tests.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common/tests.rs @@ -8,7 +8,7 @@ use std::{collections::HashMap, ops}; use futures::FutureExt; use vm_utils::storage::L1BatchParamsProvider; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use zksync_types::{ block::MiniblockHasher, fee::TransactionExecutionMetrics, L2ChainId, ProtocolVersion, ProtocolVersionId, @@ -35,7 +35,7 @@ fn test_poll_iters() { #[tokio::test] async fn creating_io_cursor_with_genesis() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await @@ -66,7 +66,7 @@ async fn creating_io_cursor_with_genesis() { #[tokio::test] async fn creating_io_cursor_with_snapshot_recovery() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; @@ -100,7 +100,7 @@ async fn creating_io_cursor_with_snapshot_recovery() { #[tokio::test] async fn waiting_for_l1_batch_params_with_genesis() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let genesis_root_hash = ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) @@ -141,7 +141,7 @@ async fn waiting_for_l1_batch_params_with_genesis() { #[tokio::test] async fn waiting_for_l1_batch_params_after_snapshot_recovery() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; @@ -188,7 +188,7 @@ async fn waiting_for_l1_batch_params_after_snapshot_recovery() { #[tokio::test] async fn getting_first_miniblock_in_batch_with_genesis() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await @@ -236,7 +236,7 @@ async fn getting_first_miniblock_in_batch_with_genesis() { async fn assert_first_miniblock_numbers( provider: &L1BatchParamsProvider, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, batches_and_miniblocks: &HashMap, ()>>, ) { for (&batch, &expected_miniblock) in batches_and_miniblocks { @@ -260,7 +260,7 @@ async fn assert_first_miniblock_numbers( #[tokio::test] async fn getting_first_miniblock_in_batch_after_snapshot_recovery() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; @@ -308,7 +308,7 @@ async fn getting_first_miniblock_in_batch_after_snapshot_recovery() { #[tokio::test] async fn loading_pending_batch_with_genesis() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let genesis_params = GenesisParams::mock(); ensure_genesis_state(&mut storage, L2ChainId::default(), &genesis_params) @@ -354,7 +354,7 @@ async fn loading_pending_batch_with_genesis() { } async fn store_pending_miniblocks( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, numbers: ops::RangeInclusive, contract_hashes: BaseSystemContractsHashes, ) { @@ -381,7 +381,7 @@ async fn store_pending_miniblocks( #[tokio::test] async fn loading_pending_batch_after_snapshot_recovery() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; @@ -441,7 +441,7 @@ async fn loading_pending_batch_after_snapshot_recovery() { #[tokio::test] async fn getting_batch_version_with_genesis() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let mut genesis_params = GenesisParams::mock(); genesis_params.protocol_version = ProtocolVersionId::Version5; @@ -482,7 +482,7 @@ async fn getting_batch_version_with_genesis() { #[tokio::test] async fn getting_batch_version_after_snapshot_recovery() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; diff --git a/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs b/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs index 84fdc8e44cff..9708c7cc1311 100644 --- a/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs +++ b/core/lib/zksync_core/src/state_keeper/io/fee_address_migration.rs @@ -6,12 +6,12 @@ use std::time::{Duration, Instant}; use anyhow::Context as _; use tokio::sync::watch; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_types::MiniblockNumber; /// Runs the migration for pending miniblocks. pub(crate) async fn migrate_pending_miniblocks( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, ) -> anyhow::Result<()> { let started_at = Instant::now(); tracing::info!("Started migrating `fee_account_address` for pending miniblocks"); @@ -40,7 +40,7 @@ pub(crate) async fn migrate_pending_miniblocks( /// Runs the migration for non-pending miniblocks. Should be run as a background task. pub(crate) async fn migrate_miniblocks( - pool: ConnectionPool, + pool: ConnectionPool, last_miniblock: MiniblockNumber, stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { @@ -82,7 +82,7 @@ struct MigrationOutput { /// It's important for the `chunk_size` to be a constant; this ensures that each chunk is migrated atomically. async fn migrate_miniblocks_inner( - pool: ConnectionPool, + pool: ConnectionPool, last_miniblock: MiniblockNumber, chunk_size: u32, sleep_interval: Duration, @@ -153,7 +153,7 @@ async fn migrate_miniblocks_inner( #[allow(deprecated)] async fn is_fee_address_migrated( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, miniblock: MiniblockNumber, ) -> anyhow::Result { storage @@ -175,7 +175,7 @@ mod tests { use super::*; use crate::utils::testonly::create_miniblock; - async fn prepare_storage(storage: &mut StorageProcessor<'_>) { + async fn prepare_storage(storage: &mut StorageProcessor<'_, Server>) { storage .protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -216,7 +216,7 @@ mod tests { } } - async fn assert_migration(storage: &mut StorageProcessor<'_>) { + async fn assert_migration(storage: &mut StorageProcessor<'_, Server>) { for number in 0..5 { assert!(is_fee_address_migrated(storage, MiniblockNumber(number)) .await @@ -237,7 +237,7 @@ mod tests { #[tokio::test] async fn migration_basics(chunk_size: u32) { // Replicate providing a pool with a single connection. - let pool = ConnectionPool::constrained_test_pool(1).await; + let pool = ConnectionPool::::constrained_test_pool(1).await; let mut storage = pool.access_storage().await.unwrap(); prepare_storage(&mut storage).await; drop(storage); @@ -277,7 +277,7 @@ mod tests { #[test_casing(3, [1, 2, 3])] #[tokio::test] async fn stopping_and_resuming_migration(chunk_size: u32) { - let pool = ConnectionPool::constrained_test_pool(1).await; + let pool = ConnectionPool::::constrained_test_pool(1).await; let mut storage = pool.access_storage().await.unwrap(); prepare_storage(&mut storage).await; drop(storage); @@ -316,7 +316,7 @@ mod tests { #[test_casing(3, [1, 2, 3])] #[tokio::test] async fn new_blocks_added_during_migration(chunk_size: u32) { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); prepare_storage(&mut storage).await; diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index ee635f3728b3..5a04dcaac042 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -13,11 +13,11 @@ use multivm::{ }; use vm_utils::storage::{l1_batch_params, L1BatchParamsProvider}; use zksync_config::configs::chain::StateKeeperConfig; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server, ServerDals}; use zksync_mempool::L2TxFilter; use zksync_object_store::ObjectStore; use zksync_types::{ - protocol_version::ProtocolUpgradeTx, witness_block_state::WitnessBlockState, Address, + protocol_upgrade::ProtocolUpgradeTx, witness_block_state::WitnessBlockState, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, Transaction, H256, }; // TODO (SMA-1206): use seconds instead of milliseconds. @@ -47,7 +47,7 @@ use crate::{ #[derive(Debug)] pub struct MempoolIO { mempool: MempoolGuard, - pool: ConnectionPool, + pool: ConnectionPool, object_store: Arc, timeout_sealer: TimeoutSealer, filter: L2TxFilter, @@ -426,7 +426,7 @@ impl MempoolIO { object_store: Arc, miniblock_sealer_handle: MiniblockSealerHandle, batch_fee_input_provider: Arc, - pool: ConnectionPool, + pool: ConnectionPool, config: &StateKeeperConfig, delay_interval: Duration, l2_erc20_bridge_addr: Address, diff --git a/core/lib/zksync_core/src/state_keeper/io/mod.rs b/core/lib/zksync_core/src/state_keeper/io/mod.rs index 482590a67ee0..b57a33b1f428 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mod.rs @@ -6,9 +6,9 @@ use std::{ use async_trait::async_trait; use multivm::interface::{FinishedL1Batch, L1BatchEnv, SystemEnv}; use tokio::sync::{mpsc, oneshot}; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use zksync_types::{ - block::MiniblockExecutionData, protocol_version::ProtocolUpgradeTx, + block::MiniblockExecutionData, protocol_upgrade::ProtocolUpgradeTx, witness_block_state::WitnessBlockState, L1BatchNumber, MiniblockNumber, ProtocolVersionId, Transaction, }; @@ -215,7 +215,7 @@ impl MiniblockSealerHandle { /// Component responsible for sealing miniblocks (i.e., storing their data to Postgres). #[derive(Debug)] pub struct MiniblockSealer { - pool: ConnectionPool, + pool: ConnectionPool, is_sync: bool, // Weak sender handle to get queue capacity stats. commands_sender: mpsc::WeakSender>, @@ -225,7 +225,10 @@ pub struct MiniblockSealer { impl MiniblockSealer { /// Creates a sealer that will use the provided Postgres connection and will have the specified /// `command_capacity` for unprocessed sealing commands. - pub fn new(pool: ConnectionPool, mut command_capacity: usize) -> (Self, MiniblockSealerHandle) { + pub fn new( + pool: ConnectionPool, + mut command_capacity: usize, + ) -> (Self, MiniblockSealerHandle) { let is_sync = command_capacity == 0; command_capacity = command_capacity.max(1); diff --git a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs index e4cf2abfbe90..01034bb947bb 100644 --- a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs +++ b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs @@ -8,7 +8,7 @@ use multivm::{ interface::{FinishedL1Batch, L1BatchEnv}, utils::get_max_gas_per_pubdata_byte, }; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_types::{ block::{unpack_block_info, L1BatchHeader, MiniblockHeader}, event::{extract_added_tokens, extract_long_l2_to_l1_messages}, @@ -16,7 +16,7 @@ use zksync_types::{ l1::L1Tx, l2::L2Tx, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, - protocol_version::ProtocolUpgradeTx, + protocol_upgrade::ProtocolUpgradeTx, storage_writes_deduplicator::{ModifiedSlot, StorageWritesDeduplicator}, tx::{ tx_execution_info::DeduplicatedWritesMetrics, IncludedTxLocation, @@ -48,7 +48,7 @@ impl UpdatesManager { #[must_use = "fictive miniblock must be used to update I/O params"] pub(crate) async fn seal_l1_batch( mut self, - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, current_miniblock_number: MiniblockNumber, l1_batch_env: &L1BatchEnv, finished_batch: FinishedL1Batch, @@ -268,11 +268,11 @@ impl UpdatesManager { } impl MiniblockSealCommand { - pub async fn seal(&self, storage: &mut StorageProcessor<'_>) { + pub async fn seal(&self, storage: &mut StorageProcessor<'_, Server>) { self.seal_inner(storage, false).await; } - async fn insert_transactions(&self, transaction: &mut StorageProcessor<'_>) { + async fn insert_transactions(&self, transaction: &mut StorageProcessor<'_, Server>) { for tx_result in &self.miniblock.executed_transactions { let tx = tx_result.transaction.clone(); match &tx.common_data { @@ -315,7 +315,7 @@ impl MiniblockSealCommand { /// one for sending fees to the operator). /// /// `l2_erc20_bridge_addr` is required to extract the information on newly added tokens. - async fn seal_inner(&self, storage: &mut StorageProcessor<'_>, is_fictive: bool) { + async fn seal_inner(&self, storage: &mut StorageProcessor<'_, Server>, is_fictive: bool) { self.assert_valid_miniblock(is_fictive); let mut transaction = storage.start_transaction().await.unwrap(); diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index 725a635e186a..0508203d428b 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -3,7 +3,7 @@ use std::time::Duration; use futures::FutureExt; use multivm::utils::derive_base_fee_and_gas_per_pubdata; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server, ServerDals}; use zksync_mempool::L2TxFilter; use zksync_types::{ block::{BlockGasCount, MiniblockHasher}, @@ -34,7 +34,7 @@ mod tester; /// Ensure that MempoolIO.filter is correctly initialized right after mempool initialization. #[tokio::test] async fn test_filter_initialization() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let tester = Tester::new(); // Genesis is needed for proper mempool initialization. @@ -48,7 +48,7 @@ async fn test_filter_initialization() { /// Ensure that MempoolIO.filter is modified correctly if there is a pending batch upon mempool initialization. #[tokio::test] async fn test_filter_with_pending_batch() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut tester = Tester::new(); tester.genesis(&connection_pool).await; @@ -93,7 +93,7 @@ async fn test_filter_with_pending_batch() { /// Ensure that `MempoolIO.filter` is modified correctly if there is no pending batch. #[tokio::test] async fn test_filter_with_no_pending_batch() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let tester = Tester::new(); tester.genesis(&connection_pool).await; @@ -134,7 +134,7 @@ async fn test_filter_with_no_pending_batch() { } async fn test_timestamps_are_distinct( - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, prev_miniblock_timestamp: u64, delay_prev_miniblock_compared_to_batch: bool, ) { @@ -171,35 +171,35 @@ async fn test_timestamps_are_distinct( #[tokio::test] async fn l1_batch_timestamp_basics() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let current_timestamp = seconds_since_epoch(); test_timestamps_are_distinct(connection_pool, current_timestamp, false).await; } #[tokio::test] async fn l1_batch_timestamp_with_clock_skew() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let current_timestamp = seconds_since_epoch(); test_timestamps_are_distinct(connection_pool, current_timestamp + 2, false).await; } #[tokio::test] async fn l1_batch_timestamp_respects_prev_miniblock() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let current_timestamp = seconds_since_epoch(); test_timestamps_are_distinct(connection_pool, current_timestamp, true).await; } #[tokio::test] async fn l1_batch_timestamp_respects_prev_miniblock_with_clock_skew() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let current_timestamp = seconds_since_epoch(); test_timestamps_are_distinct(connection_pool, current_timestamp + 2, true).await; } #[tokio::test] async fn processing_storage_logs_when_sealing_miniblock() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let mut miniblock = MiniblockUpdates::new(0, 1, H256::zero(), 1, ProtocolVersionId::latest()); let tx = create_transaction(10, 100); @@ -296,7 +296,7 @@ async fn processing_storage_logs_when_sealing_miniblock() { #[tokio::test] async fn processing_events_when_sealing_miniblock() { - let pool = ConnectionPool::constrained_test_pool(1).await; + let pool = ConnectionPool::::constrained_test_pool(1).await; let l1_batch_number = L1BatchNumber(2); let mut miniblock = MiniblockUpdates::new(0, 1, H256::zero(), 1, ProtocolVersionId::latest()); @@ -359,7 +359,7 @@ async fn processing_events_when_sealing_miniblock() { } async fn test_miniblock_and_l1_batch_processing( - pool: ConnectionPool, + pool: ConnectionPool, miniblock_sealer_capacity: usize, ) { let tester = Tester::new(); @@ -423,19 +423,19 @@ async fn test_miniblock_and_l1_batch_processing( #[tokio::test] async fn miniblock_and_l1_batch_processing() { - let pool = ConnectionPool::constrained_test_pool(1).await; + let pool = ConnectionPool::::constrained_test_pool(1).await; test_miniblock_and_l1_batch_processing(pool, 1).await; } #[tokio::test] async fn miniblock_and_l1_batch_processing_with_sync_sealer() { - let pool = ConnectionPool::constrained_test_pool(1).await; + let pool = ConnectionPool::::constrained_test_pool(1).await; test_miniblock_and_l1_batch_processing(pool, 0).await; } #[tokio::test] async fn miniblock_processing_after_snapshot_recovery() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut storage = connection_pool.access_storage().await.unwrap(); let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; @@ -575,7 +575,7 @@ async fn miniblock_processing_after_snapshot_recovery() { #[tokio::test] async fn miniblock_sealer_handle_blocking() { - let pool = ConnectionPool::constrained_test_pool(1).await; + let pool = ConnectionPool::::constrained_test_pool(1).await; let (mut sealer, mut sealer_handle) = MiniblockSealer::new(pool, 1); // The first command should be successfully submitted immediately. @@ -632,7 +632,7 @@ async fn miniblock_sealer_handle_blocking() { #[tokio::test] async fn miniblock_sealer_handle_parallel_processing() { - let pool = ConnectionPool::constrained_test_pool(1).await; + let pool = ConnectionPool::::constrained_test_pool(1).await; let (mut sealer, mut sealer_handle) = MiniblockSealer::new(pool, 5); // 5 miniblock sealing commands can be submitted without blocking. @@ -659,7 +659,7 @@ async fn miniblock_sealer_handle_parallel_processing() { /// Ensure that subsequent miniblocks that belong to the same L1 batch have different timestamps #[tokio::test] async fn different_timestamp_for_miniblocks_in_same_batch() { - let connection_pool = ConnectionPool::constrained_test_pool(1).await; + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; let tester = Tester::new(); // Genesis is needed for proper mempool initialization. diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs index db1007475ecf..3ce1cb284a16 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs @@ -8,7 +8,7 @@ use zksync_config::{ GasAdjusterConfig, }; use zksync_contracts::BaseSystemContracts; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server, ServerDals}; use zksync_eth_client::clients::MockEthereum; use zksync_object_store::ObjectStoreFactory; use zksync_types::{ @@ -91,7 +91,7 @@ impl Tester { pub(super) async fn create_test_mempool_io( &self, - pool: ConnectionPool, + pool: ConnectionPool, miniblock_sealer_capacity: usize, ) -> (MempoolIO, MempoolGuard) { let gas_adjuster = Arc::new(self.create_gas_adjuster().await); @@ -138,7 +138,7 @@ impl Tester { self.current_timestamp = timestamp; } - pub(super) async fn genesis(&self, pool: &ConnectionPool) { + pub(super) async fn genesis(&self, pool: &ConnectionPool) { let mut storage = pool.access_storage_tagged("state_keeper").await.unwrap(); if storage.blocks_dal().is_genesis_needed().await.unwrap() { create_genesis_l1_batch( @@ -157,7 +157,7 @@ impl Tester { pub(super) async fn insert_miniblock( &self, - pool: &ConnectionPool, + pool: &ConnectionPool, number: u32, base_fee_per_gas: u64, fee_input: BatchFeeInput, @@ -193,7 +193,7 @@ impl Tester { pub(super) async fn insert_sealed_batch( &self, - pool: &ConnectionPool, + pool: &ConnectionPool, number: u32, tx_results: &[TransactionExecutionResult], ) { diff --git a/core/lib/zksync_core/src/state_keeper/keeper.rs b/core/lib/zksync_core/src/state_keeper/keeper.rs index 5635e47cf685..84f8291299e9 100644 --- a/core/lib/zksync_core/src/state_keeper/keeper.rs +++ b/core/lib/zksync_core/src/state_keeper/keeper.rs @@ -8,12 +8,10 @@ use std::{ use anyhow::Context as _; use multivm::interface::{Halt, L1BatchEnv, SystemEnv}; use tokio::sync::watch; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use zksync_types::{ - block::MiniblockExecutionData, - l2::TransactionType, - protocol_version::{ProtocolUpgradeTx, ProtocolVersionId}, - storage_writes_deduplicator::StorageWritesDeduplicator, + block::MiniblockExecutionData, l2::TransactionType, protocol_upgrade::ProtocolUpgradeTx, + protocol_version::ProtocolVersionId, storage_writes_deduplicator::StorageWritesDeduplicator, L1BatchNumber, Transaction, }; @@ -88,7 +86,7 @@ impl ZkSyncStateKeeper { /// Temporary method to migrate fee addresses from L1 batches to miniblocks. pub fn run_fee_address_migration( &self, - pool: ConnectionPool, + pool: ConnectionPool, ) -> impl Future> { let last_miniblock = self.io.current_miniblock_number() - 1; let stop_receiver = self.stop_receiver.clone(); diff --git a/core/lib/zksync_core/src/state_keeper/mempool_actor.rs b/core/lib/zksync_core/src/state_keeper/mempool_actor.rs index 1e864b063475..813f4c18e266 100644 --- a/core/lib/zksync_core/src/state_keeper/mempool_actor.rs +++ b/core/lib/zksync_core/src/state_keeper/mempool_actor.rs @@ -6,7 +6,7 @@ use multivm::utils::derive_base_fee_and_gas_per_pubdata; use tokio::sync::mpsc; use tokio::sync::watch; use zksync_config::configs::chain::MempoolConfig; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_mempool::L2TxFilter; #[cfg(test)] use zksync_types::H256; @@ -35,7 +35,7 @@ pub async fn l2_tx_filter( #[derive(Debug)] pub struct MempoolFetcher { mempool: MempoolGuard, - pool: ConnectionPool, + pool: ConnectionPool, batch_fee_input_provider: Arc, sync_interval: Duration, sync_batch_size: usize, @@ -49,7 +49,7 @@ impl MempoolFetcher { mempool: MempoolGuard, batch_fee_input_provider: Arc, config: &MempoolConfig, - pool: ConnectionPool, + pool: ConnectionPool, ) -> Self { Self { mempool, @@ -131,7 +131,7 @@ impl MempoolFetcher { /// Loads nonces for all distinct `transactions` initiators from the storage. async fn get_transaction_nonces( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, transactions: &[Transaction], ) -> anyhow::Result> { let (nonce_keys, address_by_nonce_key): (Vec<_>, HashMap<_, _>) = transactions @@ -183,7 +183,7 @@ mod tests { #[tokio::test] async fn getting_transaction_nonces() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let transaction = create_l2_transaction(10, 100); @@ -217,7 +217,7 @@ mod tests { #[tokio::test] async fn syncing_mempool_basics() { - let pool = ConnectionPool::constrained_test_pool(1).await; + let pool = ConnectionPool::::constrained_test_pool(1).await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await @@ -274,7 +274,7 @@ mod tests { #[tokio::test] async fn ignoring_transaction_with_insufficient_fee() { - let pool = ConnectionPool::constrained_test_pool(1).await; + let pool = ConnectionPool::::constrained_test_pool(1).await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await @@ -314,7 +314,7 @@ mod tests { #[tokio::test] async fn ignoring_transaction_with_old_nonce() { - let pool = ConnectionPool::constrained_test_pool(1).await; + let pool = ConnectionPool::::constrained_test_pool(1).await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await diff --git a/core/lib/zksync_core/src/state_keeper/mod.rs b/core/lib/zksync_core/src/state_keeper/mod.rs index 49c89a9d53bb..1e45fd78305a 100644 --- a/core/lib/zksync_core/src/state_keeper/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/mod.rs @@ -5,7 +5,7 @@ use zksync_config::{ configs::chain::{MempoolConfig, NetworkConfig, StateKeeperConfig}, ContractsConfig, DBConfig, }; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use zksync_object_store::ObjectStore; pub use self::{ @@ -37,7 +37,7 @@ pub(crate) async fn create_state_keeper( db_config: &DBConfig, network_config: &NetworkConfig, mempool_config: &MempoolConfig, - pool: ConnectionPool, + pool: ConnectionPool, mempool: MempoolGuard, batch_fee_input_provider: Arc, miniblock_sealer_handle: MiniblockSealerHandle, diff --git a/core/lib/zksync_core/src/state_keeper/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/tests/tester.rs index 0c3e82ec5282..eaf041bcc66b 100644 --- a/core/lib/zksync_core/src/state_keeper/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/tests/tester.rs @@ -16,7 +16,7 @@ use multivm::{ }; use tokio::sync::{mpsc, watch}; use zksync_types::{ - block::MiniblockExecutionData, fee_model::BatchFeeInput, protocol_version::ProtocolUpgradeTx, + block::MiniblockExecutionData, fee_model::BatchFeeInput, protocol_upgrade::ProtocolUpgradeTx, witness_block_state::WitnessBlockState, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, Transaction, H256, }; diff --git a/core/lib/zksync_core/src/state_keeper/types.rs b/core/lib/zksync_core/src/state_keeper/types.rs index 34ed66895cb0..73120bd4154d 100644 --- a/core/lib/zksync_core/src/state_keeper/types.rs +++ b/core/lib/zksync_core/src/state_keeper/types.rs @@ -4,7 +4,7 @@ use std::{ }; use multivm::interface::VmExecutionResultAndLogs; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_mempool::{L2TxFilter, MempoolInfo, MempoolStore}; use zksync_types::{ block::BlockGasCount, tx::ExecutionMetrics, Address, Nonce, PriorityOpId, Transaction, @@ -17,7 +17,10 @@ use crate::gas_tracker::{gas_count_from_metrics, gas_count_from_tx_and_metrics}; pub struct MempoolGuard(Arc>); impl MempoolGuard { - pub async fn from_storage(storage_processor: &mut StorageProcessor<'_>, capacity: u64) -> Self { + pub async fn from_storage( + storage_processor: &mut StorageProcessor<'_, Server>, + capacity: u64, + ) -> Self { let next_priority_id = storage_processor .transactions_dal() .next_priority_id() diff --git a/core/lib/zksync_core/src/sync_layer/batch_status_updater/mod.rs b/core/lib/zksync_core/src/sync_layer/batch_status_updater/mod.rs index a6a1835a9f18..adbfcb52f64b 100644 --- a/core/lib/zksync_core/src/sync_layer/batch_status_updater/mod.rs +++ b/core/lib/zksync_core/src/sync_layer/batch_status_updater/mod.rs @@ -9,7 +9,7 @@ use serde::Serialize; #[cfg(test)] use tokio::sync::mpsc; use tokio::sync::watch; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_types::{ aggregated_operations::AggregatedActionType, api, L1BatchNumber, MiniblockNumber, H256, @@ -126,7 +126,7 @@ struct UpdaterCursor { } impl UpdaterCursor { - async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { + async fn new(storage: &mut StorageProcessor<'_, Server>) -> anyhow::Result { let first_l1_batch_number = projected_first_l1_batch(storage).await?; // Use the snapshot L1 batch, or the genesis batch if we are not using a snapshot. Technically, the snapshot L1 batch // is not necessarily proven / executed yet, but since it and earlier batches are not stored, it serves @@ -243,7 +243,7 @@ impl UpdaterCursor { #[derive(Debug)] pub struct BatchStatusUpdater { client: Box, - pool: ConnectionPool, + pool: ConnectionPool, health_updater: HealthUpdater, sleep_interval: Duration, /// Test-only sender of status changes each time they are produced and applied to the storage. @@ -254,13 +254,13 @@ pub struct BatchStatusUpdater { impl BatchStatusUpdater { const DEFAULT_SLEEP_INTERVAL: Duration = Duration::from_secs(5); - pub fn new(client: HttpClient, pool: ConnectionPool) -> Self { + pub fn new(client: HttpClient, pool: ConnectionPool) -> Self { Self::from_parts(Box::new(client), pool, Self::DEFAULT_SLEEP_INTERVAL) } fn from_parts( client: Box, - pool: ConnectionPool, + pool: ConnectionPool, sleep_interval: Duration, ) -> Self { Self { diff --git a/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs b/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs index 9922055f42be..50aa78898b75 100644 --- a/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs @@ -6,6 +6,7 @@ use chrono::TimeZone; use test_casing::{test_casing, Product}; use tokio::sync::{watch, Mutex}; use zksync_contracts::BaseSystemContractsHashes; +use zksync_dal::StorageProcessor; use zksync_types::{Address, L2ChainId, ProtocolVersionId}; use super::*; @@ -15,7 +16,7 @@ use crate::{ utils::testonly::{create_l1_batch, create_miniblock, prepare_recovery_snapshot}, }; -async fn seal_l1_batch(storage: &mut StorageProcessor<'_>, number: L1BatchNumber) { +async fn seal_l1_batch(storage: &mut StorageProcessor<'_, Server>, number: L1BatchNumber) { let mut storage = storage.start_transaction().await.unwrap(); // Insert a mock miniblock so that `get_block_details()` will return values. let miniblock = create_miniblock(number.0); @@ -104,7 +105,7 @@ impl L1BatchStagesMap { } } - async fn assert_storage(&self, storage: &mut StorageProcessor<'_>) { + async fn assert_storage(&self, storage: &mut StorageProcessor<'_, Server>) { for (number, stage) in self.iter() { let local_details = storage .blocks_web3_dal() @@ -213,7 +214,7 @@ fn mock_change(number: L1BatchNumber) -> BatchStatusChange { fn mock_updater( client: MockMainNodeClient, - pool: ConnectionPool, + pool: ConnectionPool, ) -> (BatchStatusUpdater, mpsc::UnboundedReceiver) { let (changes_sender, changes_receiver) = mpsc::unbounded_channel(); let mut updater = @@ -224,7 +225,7 @@ fn mock_updater( #[tokio::test] async fn updater_cursor_for_storage_with_genesis_block() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) .await @@ -259,7 +260,7 @@ async fn updater_cursor_for_storage_with_genesis_block() { #[tokio::test] async fn updater_cursor_after_snapshot_recovery() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; @@ -272,7 +273,7 @@ async fn updater_cursor_after_snapshot_recovery() { #[test_casing(4, Product(([false, true], [false, true])))] #[tokio::test] async fn normal_updater_operation(snapshot_recovery: bool, async_batches: bool) { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let first_batch_number = if snapshot_recovery { prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; @@ -344,7 +345,7 @@ async fn normal_updater_operation(snapshot_recovery: bool, async_batches: bool) #[test_casing(2, [false, true])] #[tokio::test] async fn updater_with_gradual_main_node_updates(snapshot_recovery: bool) { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let first_batch_number = if snapshot_recovery { prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; @@ -415,7 +416,10 @@ async fn updater_with_gradual_main_node_updates(snapshot_recovery: bool) { test_resuming_updater(pool, target_batch_stages).await; } -async fn test_resuming_updater(pool: ConnectionPool, initial_batch_stages: L1BatchStagesMap) { +async fn test_resuming_updater( + pool: ConnectionPool, + initial_batch_stages: L1BatchStagesMap, +) { let target_batch_stages = L1BatchStagesMap::new( initial_batch_stages.first_batch_number, vec![L1BatchStage::Executed; 6], diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index 5152e7361d78..fa4e492e7692 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -5,9 +5,9 @@ use async_trait::async_trait; use multivm::interface::{FinishedL1Batch, L1BatchEnv, SystemEnv}; use vm_utils::storage::{l1_batch_params, L1BatchParamsProvider}; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server, ServerDals}; use zksync_types::{ - ethabi::Address, fee_model::BatchFeeInput, protocol_version::ProtocolUpgradeTx, + ethabi::Address, fee_model::BatchFeeInput, protocol_upgrade::ProtocolUpgradeTx, witness_block_state::WitnessBlockState, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, Transaction, H256, }; @@ -44,7 +44,7 @@ const POLL_INTERVAL: Duration = Duration::from_millis(100); #[derive(Debug)] pub struct ExternalIO { miniblock_sealer_handle: MiniblockSealerHandle, - pool: ConnectionPool, + pool: ConnectionPool, current_l1_batch_number: L1BatchNumber, current_miniblock_number: MiniblockNumber, @@ -65,7 +65,7 @@ impl ExternalIO { #[allow(clippy::too_many_arguments)] pub async fn new( miniblock_sealer_handle: MiniblockSealerHandle, - pool: ConnectionPool, + pool: ConnectionPool, actions: ActionQueue, sync_state: SyncState, main_node_client: Box, diff --git a/core/lib/zksync_core/src/sync_layer/fetcher.rs b/core/lib/zksync_core/src/sync_layer/fetcher.rs index 87973ac363b8..e8fe51968240 100644 --- a/core/lib/zksync_core/src/sync_layer/fetcher.rs +++ b/core/lib/zksync_core/src/sync_layer/fetcher.rs @@ -1,5 +1,5 @@ use anyhow::Context as _; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_types::{ api::en::SyncBlock, block::MiniblockHasher, helpers::unix_timestamp_ms, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, @@ -102,7 +102,9 @@ impl TryFrom for FetchedBlock { impl IoCursor { /// Loads this cursor from storage and modifies it to account for the pending L1 batch if necessary. - pub(crate) async fn for_fetcher(storage: &mut StorageProcessor<'_>) -> anyhow::Result { + pub(crate) async fn for_fetcher( + storage: &mut StorageProcessor<'_, Server>, + ) -> anyhow::Result { let mut this = Self::new(storage).await?; // It's important to know whether we have opened a new batch already or just sealed the previous one. // Depending on it, we must either insert `OpenBatch` item into the queue, or not. diff --git a/core/lib/zksync_core/src/sync_layer/genesis.rs b/core/lib/zksync_core/src/sync_layer/genesis.rs index c4a0897c36b8..d2db0c6b5940 100644 --- a/core/lib/zksync_core/src/sync_layer/genesis.rs +++ b/core/lib/zksync_core/src/sync_layer/genesis.rs @@ -1,6 +1,6 @@ use anyhow::Context as _; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes, SystemContractCode}; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_types::{ block::DeployedContract, protocol_version::L1VerifierConfig, system_contracts::get_system_smart_contracts, AccountTreeId, L1BatchNumber, L2ChainId, H256, @@ -10,7 +10,7 @@ use super::client::MainNodeClient; use crate::genesis::{ensure_genesis_state, GenesisParams}; pub async fn perform_genesis_if_needed( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, zksync_chain_id: L2ChainId, client: &dyn MainNodeClient, ) -> anyhow::Result<()> { diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 7f514d163dc1..5e03e3283374 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -9,7 +9,7 @@ use std::{ use test_casing::test_casing; use tokio::{sync::watch, task::JoinHandle}; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_types::{ api, block::MiniblockHasher, @@ -56,7 +56,7 @@ pub(super) struct StateKeeperHandles { impl StateKeeperHandles { /// `tx_hashes` are grouped by the L1 batch. pub async fn new( - pool: ConnectionPool, + pool: ConnectionPool, main_node_client: MockMainNodeClient, actions: ActionQueue, tx_hashes: &[&[H256]], @@ -125,7 +125,7 @@ impl StateKeeperHandles { } } -async fn ensure_genesis(storage: &mut StorageProcessor<'_>) { +async fn ensure_genesis(storage: &mut StorageProcessor<'_, Server>) { if storage.blocks_dal().is_genesis_needed().await.unwrap() { ensure_genesis_state(storage, L2ChainId::default(), &GenesisParams::mock()) .await @@ -163,7 +163,7 @@ fn genesis_snapshot_recovery_status() -> SnapshotRecoveryStatus { #[test_casing(2, [false, true])] #[tokio::test] async fn external_io_basics(snapshot_recovery: bool) { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let snapshot = if snapshot_recovery { prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await @@ -235,7 +235,7 @@ async fn external_io_basics(snapshot_recovery: bool) { #[test_casing(2, [false, true])] #[tokio::test] async fn external_io_works_without_local_protocol_version(snapshot_recovery: bool) { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let snapshot = if snapshot_recovery { prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await @@ -314,7 +314,7 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo } pub(super) async fn run_state_keeper_with_multiple_miniblocks( - pool: ConnectionPool, + pool: ConnectionPool, snapshot_recovery: bool, ) -> (SnapshotRecoveryStatus, Vec) { let mut storage = pool.access_storage().await.unwrap(); @@ -374,7 +374,7 @@ pub(super) async fn run_state_keeper_with_multiple_miniblocks( #[test_casing(2, [false, true])] #[tokio::test] async fn external_io_with_multiple_miniblocks(snapshot_recovery: bool) { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let (snapshot, tx_hashes) = run_state_keeper_with_multiple_miniblocks(pool.clone(), snapshot_recovery).await; assert_eq!(tx_hashes.len(), 8); @@ -413,7 +413,7 @@ async fn external_io_with_multiple_miniblocks(snapshot_recovery: bool) { } async fn test_external_io_recovery( - pool: ConnectionPool, + pool: ConnectionPool, snapshot: &SnapshotRecoveryStatus, mut tx_hashes: Vec, ) { @@ -459,7 +459,7 @@ async fn test_external_io_recovery( assert_eq!(miniblock.timestamp, snapshot.miniblock_timestamp + 3); } -pub(super) async fn mock_l1_batch_hash_computation(pool: ConnectionPool, number: u32) { +pub(super) async fn mock_l1_batch_hash_computation(pool: ConnectionPool, number: u32) { loop { let mut storage = pool.access_storage().await.unwrap(); let last_l1_batch_number = storage @@ -484,7 +484,7 @@ pub(super) async fn mock_l1_batch_hash_computation(pool: ConnectionPool, number: /// Returns tx hashes of all generated transactions, grouped by the L1 batch. pub(super) async fn run_state_keeper_with_multiple_l1_batches( - pool: ConnectionPool, + pool: ConnectionPool, snapshot_recovery: bool, ) -> (SnapshotRecoveryStatus, Vec>) { let mut storage = pool.access_storage().await.unwrap(); @@ -551,7 +551,7 @@ pub(super) async fn run_state_keeper_with_multiple_l1_batches( #[tokio::test] async fn external_io_with_multiple_l1_batches() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; run_state_keeper_with_multiple_l1_batches(pool.clone(), false).await; let mut storage = pool.access_storage().await.unwrap(); diff --git a/core/lib/zksync_core/src/utils/mod.rs b/core/lib/zksync_core/src/utils/mod.rs index ad2e4bd05412..7e667d7a02b2 100644 --- a/core/lib/zksync_core/src/utils/mod.rs +++ b/core/lib/zksync_core/src/utils/mod.rs @@ -9,7 +9,7 @@ use std::{ use anyhow::Context as _; use async_trait::async_trait; use tokio::sync::watch; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerDals, StorageProcessor}; use zksync_types::{L1BatchNumber, ProtocolVersionId}; #[cfg(test)] @@ -58,7 +58,7 @@ pub(crate) async fn binary_search_with( /// /// Returns the number of the *earliest* L1 batch, or `None` if the stop signal is received. pub(crate) async fn wait_for_l1_batch( - pool: &ConnectionPool, + pool: &ConnectionPool, poll_interval: Duration, stop_receiver: &mut watch::Receiver, ) -> anyhow::Result> { @@ -89,7 +89,7 @@ pub(crate) async fn wait_for_l1_batch( /// /// Returns the number of the *earliest* L1 batch with metadata, or `None` if the stop signal is received. pub(crate) async fn wait_for_l1_batch_with_metadata( - pool: &ConnectionPool, + pool: &ConnectionPool, poll_interval: Duration, stop_receiver: &mut watch::Receiver, ) -> anyhow::Result> { @@ -120,7 +120,7 @@ pub(crate) async fn wait_for_l1_batch_with_metadata( /// Returns the projected number of the first locally available L1 batch. The L1 batch is **not** /// guaranteed to be present in the storage! pub(crate) async fn projected_first_l1_batch( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, ) -> anyhow::Result { let snapshot_recovery = storage .snapshot_recovery_dal() @@ -133,7 +133,7 @@ pub(crate) async fn projected_first_l1_batch( /// Obtains a protocol version projected to be applied for the next miniblock. This is either the version used by the last /// sealed miniblock, or (if there are no miniblocks), one referenced in the snapshot recovery record. pub(crate) async fn pending_protocol_version( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, ) -> anyhow::Result { static WARNED_ABOUT_NO_VERSION: AtomicBool = AtomicBool::new(false); @@ -181,7 +181,7 @@ mod tests { #[tokio::test] async fn waiting_for_l1_batch_success() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let (_stop_sender, mut stop_receiver) = watch::channel(false); let pool_copy = pool.clone(); @@ -201,7 +201,7 @@ mod tests { #[tokio::test] async fn waiting_for_l1_batch_cancellation() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let (stop_sender, mut stop_receiver) = watch::channel(false); tokio::spawn(async move { diff --git a/core/lib/zksync_core/src/utils/testonly.rs b/core/lib/zksync_core/src/utils/testonly.rs index 35f3dbdfbe55..452bb4c9783c 100644 --- a/core/lib/zksync_core/src/utils/testonly.rs +++ b/core/lib/zksync_core/src/utils/testonly.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use multivm::utils::get_max_gas_per_pubdata_byte; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::StorageProcessor; +use zksync_dal::{Server, ServerDals, StorageProcessor}; use zksync_merkle_tree::{domain::ZkSyncTree, TreeInstruction}; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{ @@ -201,7 +201,7 @@ impl Snapshot { /// Prepares a recovery snapshot without performing genesis. pub(crate) async fn prepare_recovery_snapshot( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, l1_batch: L1BatchNumber, miniblock: MiniblockNumber, storage_logs: &[StorageLog], @@ -210,7 +210,7 @@ pub(crate) async fn prepare_recovery_snapshot( } /// Takes a storage snapshot at the last sealed L1 batch. -pub(crate) async fn snapshot(storage: &mut StorageProcessor<'_>) -> Snapshot { +pub(crate) async fn snapshot(storage: &mut StorageProcessor<'_, Server>) -> Snapshot { let l1_batch = storage .blocks_dal() .get_sealed_l1_batch_number() @@ -259,7 +259,7 @@ pub(crate) async fn snapshot(storage: &mut StorageProcessor<'_>) -> Snapshot { /// Recovers storage from a snapshot. /// Miniblock and L1 batch are intentionally **not** inserted into the storage. pub(crate) async fn recover( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Server>, snapshot: Snapshot, ) -> SnapshotRecoveryStatus { let mut storage = storage.start_transaction().await.unwrap(); diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 9a8bf4465cd3..147a8ebc2f3a 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -14,6 +14,8 @@ prometheus_exporter = { path = "../../lib/prometheus_exporter" } zksync_types = { path = "../../lib/types" } zksync_health_check = { path = "../../lib/health_check" } zksync_dal = { path = "../../lib/dal" } +prover_dal = { path = "../../../prover/prover_dal" } +zksync_db_connection = { path = "../../lib/db_connection" } zksync_config = { path = "../../lib/config" } zksync_state = { path = "../../lib/state" } zksync_object_store = { path = "../../lib/object_store" } diff --git a/core/node/node_framework/src/implementations/layers/eth_watch.rs b/core/node/node_framework/src/implementations/layers/eth_watch.rs index 40d2776defd6..105e2c424da3 100644 --- a/core/node/node_framework/src/implementations/layers/eth_watch.rs +++ b/core/node/node_framework/src/implementations/layers/eth_watch.rs @@ -3,7 +3,7 @@ use std::time::Duration; use zksync_config::{ContractsConfig, ETHWatchConfig}; use zksync_contracts::governance_contract; use zksync_core::eth_watch::{client::EthHttpQueryClient, EthWatch}; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use zksync_types::{ethabi::Contract, Address}; use crate::{ @@ -60,7 +60,7 @@ impl WiringLayer for EthWatchLayer { #[derive(Debug)] struct EthWatchTask { - main_pool: ConnectionPool, + main_pool: ConnectionPool, client: EthHttpQueryClient, governance_contract: Option, diamond_proxy_address: Address, diff --git a/core/node/node_framework/src/implementations/layers/house_keeper.rs b/core/node/node_framework/src/implementations/layers/house_keeper.rs index 4b0fa0351b92..16b8029c7338 100644 --- a/core/node/node_framework/src/implementations/layers/house_keeper.rs +++ b/core/node/node_framework/src/implementations/layers/house_keeper.rs @@ -16,7 +16,7 @@ use zksync_core::house_keeper::{ periodic_job::PeriodicJob, waiting_to_queued_fri_witness_job_mover::WaitingToQueuedFriWitnessJobMover, }; -use zksync_dal::ConnectionPool; +use zksync_dal::{metrics::PostgresMetrics, ConnectionPool, Server}; use crate::{ implementations::resources::pools::{ProverPoolResource, ReplicaPoolResource}, @@ -167,7 +167,7 @@ impl WiringLayer for HouseKeeperLayer { #[derive(Debug)] struct PoolForMetricsTask { - pool_for_metrics: ConnectionPool, + pool_for_metrics: ConnectionPool, } #[async_trait::async_trait] @@ -177,10 +177,7 @@ impl Task for PoolForMetricsTask { } async fn run(self: Box, _stop_receiver: StopReceiver) -> anyhow::Result<()> { - self.pool_for_metrics - .run_postgres_metrics_scraping(SCRAPE_INTERVAL) - .await; - + PostgresMetrics::run_scraping(self.pool_for_metrics, SCRAPE_INTERVAL).await; Ok(()) } } diff --git a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs index 5f9ea646304f..7a7e4d8d9768 100644 --- a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs +++ b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs @@ -1,5 +1,5 @@ use zksync_core::metadata_calculator::{MetadataCalculator, MetadataCalculatorConfig}; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use zksync_storage::RocksDB; use crate::{ @@ -26,7 +26,7 @@ pub struct MetadataCalculatorLayer(pub MetadataCalculatorConfig); #[derive(Debug)] pub struct MetadataCalculatorTask { metadata_calculator: MetadataCalculator, - main_pool: ConnectionPool, + main_pool: ConnectionPool, } #[async_trait::async_trait] diff --git a/core/node/node_framework/src/implementations/layers/pools_layer.rs b/core/node/node_framework/src/implementations/layers/pools_layer.rs index 111497e67a4d..5c2c7706bc06 100644 --- a/core/node/node_framework/src/implementations/layers/pools_layer.rs +++ b/core/node/node_framework/src/implementations/layers/pools_layer.rs @@ -1,5 +1,6 @@ +use prover_dal::Prover; use zksync_config::configs::PostgresConfig; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use crate::{ implementations::resources::pools::{ @@ -74,22 +75,28 @@ impl WiringLayer for PoolsLayer { } if self.with_master { - let mut master_pool = - ConnectionPool::builder(self.config.master_url()?, self.config.max_connections()?); + let mut master_pool = ConnectionPool::::builder( + self.config.master_url()?, + self.config.max_connections()?, + ); master_pool.set_statement_timeout(self.config.statement_timeout()); context.insert_resource(MasterPoolResource::new(master_pool))?; } if self.with_replica { - let mut replica_pool = - ConnectionPool::builder(self.config.replica_url()?, self.config.max_connections()?); + let mut replica_pool = ConnectionPool::::builder( + self.config.replica_url()?, + self.config.max_connections()?, + ); replica_pool.set_statement_timeout(self.config.statement_timeout()); context.insert_resource(ReplicaPoolResource::new(replica_pool))?; } if self.with_prover { - let mut prover_pool = - ConnectionPool::builder(self.config.prover_url()?, self.config.max_connections()?); + let mut prover_pool = ConnectionPool::::builder( + self.config.prover_url()?, + self.config.max_connections()?, + ); prover_pool.set_statement_timeout(self.config.statement_timeout()); context.insert_resource(ProverPoolResource::new(prover_pool))?; } diff --git a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs index d9e3d3da19f8..493a3dcc1bed 100644 --- a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs +++ b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use zksync_config::{configs::ProofDataHandlerConfig, ContractsConfig}; use zksync_core::proof_data_handler; -use zksync_dal::ConnectionPool; +use zksync_dal::{ConnectionPool, Server}; use zksync_object_store::ObjectStore; use crate::{ @@ -65,7 +65,7 @@ struct ProofDataHandlerTask { proof_data_handler_config: ProofDataHandlerConfig, contracts_config: ContractsConfig, blob_store: Arc, - main_pool: ConnectionPool, + main_pool: ConnectionPool, } #[async_trait::async_trait] diff --git a/core/node/node_framework/src/implementations/resources/pools.rs b/core/node/node_framework/src/implementations/resources/pools.rs index 21ca7fe50d58..398667237832 100644 --- a/core/node/node_framework/src/implementations/resources/pools.rs +++ b/core/node/node_framework/src/implementations/resources/pools.rs @@ -3,7 +3,9 @@ use std::sync::{ Arc, }; -use zksync_dal::{connection::ConnectionPoolBuilder, ConnectionPool}; +use prover_dal::Prover; +use zksync_dal::{ConnectionPool, Server}; +use zksync_db_connection::connection::ConnectionPoolBuilder; use crate::resource::Resource; @@ -11,7 +13,7 @@ use crate::resource::Resource; #[derive(Debug, Clone)] pub struct MasterPoolResource { connections_count: Arc, - builder: ConnectionPoolBuilder, + builder: ConnectionPoolBuilder, } impl Resource for MasterPoolResource { @@ -21,14 +23,14 @@ impl Resource for MasterPoolResource { } impl MasterPoolResource { - pub fn new(builder: ConnectionPoolBuilder) -> Self { + pub fn new(builder: ConnectionPoolBuilder) -> Self { Self { connections_count: Arc::new(AtomicU32::new(0)), builder, } } - pub async fn get(&self) -> anyhow::Result { + pub async fn get(&self) -> anyhow::Result> { let result = self.builder.build().await; if result.is_ok() { @@ -43,11 +45,11 @@ impl MasterPoolResource { result } - pub async fn get_singleton(&self) -> anyhow::Result { + pub async fn get_singleton(&self) -> anyhow::Result> { self.get_custom(1).await } - pub async fn get_custom(&self, size: u32) -> anyhow::Result { + pub async fn get_custom(&self, size: u32) -> anyhow::Result> { let result = self.builder.clone().set_max_size(size).build().await; if result.is_ok() { @@ -66,7 +68,7 @@ impl MasterPoolResource { #[derive(Debug, Clone)] pub struct ReplicaPoolResource { connections_count: Arc, - builder: ConnectionPoolBuilder, + builder: ConnectionPoolBuilder, } impl Resource for ReplicaPoolResource { @@ -76,14 +78,14 @@ impl Resource for ReplicaPoolResource { } impl ReplicaPoolResource { - pub fn new(builder: ConnectionPoolBuilder) -> Self { + pub fn new(builder: ConnectionPoolBuilder) -> Self { Self { connections_count: Arc::new(AtomicU32::new(0)), builder, } } - pub async fn get(&self) -> anyhow::Result { + pub async fn get(&self) -> anyhow::Result> { let result = self.builder.build().await; if result.is_ok() { @@ -98,11 +100,11 @@ impl ReplicaPoolResource { result } - pub async fn get_singleton(&self) -> anyhow::Result { + pub async fn get_singleton(&self) -> anyhow::Result> { self.get_custom(1).await } - pub async fn get_custom(&self, size: u32) -> anyhow::Result { + pub async fn get_custom(&self, size: u32) -> anyhow::Result> { let result = self.builder.clone().set_max_size(size).build().await; if result.is_ok() { @@ -121,7 +123,7 @@ impl ReplicaPoolResource { #[derive(Debug, Clone)] pub struct ProverPoolResource { connections_count: Arc, - builder: ConnectionPoolBuilder, + builder: ConnectionPoolBuilder, } impl Resource for ProverPoolResource { @@ -131,14 +133,14 @@ impl Resource for ProverPoolResource { } impl ProverPoolResource { - pub fn new(builder: ConnectionPoolBuilder) -> Self { + pub fn new(builder: ConnectionPoolBuilder) -> Self { Self { connections_count: Arc::new(AtomicU32::new(0)), builder, } } - pub async fn get(&self) -> anyhow::Result { + pub async fn get(&self) -> anyhow::Result> { let result = self.builder.build().await; if result.is_ok() { @@ -153,11 +155,11 @@ impl ProverPoolResource { result } - pub async fn get_singleton(&self) -> anyhow::Result { + pub async fn get_singleton(&self) -> anyhow::Result> { self.get_custom(1).await } - pub async fn get_custom(&self, size: u32) -> anyhow::Result { + pub async fn get_custom(&self, size: u32) -> anyhow::Result> { let result = self.builder.clone().set_max_size(size).build().await; if result.is_ok() { diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 19e216b51958..e2a84a00f32c 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -586,7 +586,7 @@ dependencies = [ "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "const_format", "convert_case", - "crossbeam 0.7.3", + "crossbeam 0.8.4", "crypto-bigint 0.5.5", "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-boojum?branch=main)", "derivative", @@ -778,9 +778,9 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" [[package]] name = "chrono" -version = "0.4.34" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" dependencies = [ "android-tzdata", "iana-time-zone", @@ -788,7 +788,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.0", + "windows-targets 0.48.5", ] [[package]] @@ -3437,7 +3437,16 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a015b430d3c108a207fd776d2e2196aaf8b1cf8cf93253e3a097ff3085076a1" dependencies = [ - "num_enum_derive", + "num_enum_derive 0.6.1", +] + +[[package]] +name = "num_enum" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" +dependencies = [ + "num_enum_derive 0.7.2", ] [[package]] @@ -3452,6 +3461,18 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "num_enum_derive" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" +dependencies = [ + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.78", + "quote 1.0.35", + "syn 2.0.48", +] + [[package]] name = "object" version = "0.32.2" @@ -4032,6 +4053,12 @@ dependencies = [ [[package]] name = "prover_dal" version = "0.1.0" +dependencies = [ + "sqlx", + "strum", + "zksync_basic_types", + "zksync_db_connection", +] [[package]] name = "ptr_meta" @@ -6752,7 +6779,7 @@ version = "0.1.0" source = "git+https://github.com/matter-labs/era-zk_evm_abstractions.git#32dd320953841aa78579d9da08abbc70bcaed175" dependencies = [ "anyhow", - "num_enum", + "num_enum 0.6.1", "serde", "static_assertions", "zkevm_opcode_defs 1.3.2", @@ -6764,7 +6791,7 @@ version = "1.4.1" source = "git+https://github.com/matter-labs/era-zk_evm_abstractions.git?branch=v1.4.1#0aac08c3b097ee8147e748475117ac46bddcdcef" dependencies = [ "anyhow", - "num_enum", + "num_enum 0.6.1", "serde", "static_assertions", "zkevm_opcode_defs 1.4.1", @@ -6952,8 +6979,11 @@ dependencies = [ name = "zksync_basic_types" version = "0.1.0" dependencies = [ + "chrono", + "num_enum 0.7.2", "serde", "serde_json", + "strum", "web3", ] @@ -7086,7 +7116,6 @@ dependencies = [ "chrono", "hex", "itertools 0.10.5", - "once_cell", "prost", "rand 0.8.5", "serde", @@ -7096,12 +7125,11 @@ dependencies = [ "thiserror", "tokio", "tracing", - "url", "vise", "zksync_consensus_roles", "zksync_consensus_storage", "zksync_contracts", - "zksync_health_check", + "zksync_db_connection", "zksync_protobuf", "zksync_protobuf_build", "zksync_system_constants", @@ -7109,6 +7137,22 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_db_connection" +version = "0.1.0" +dependencies = [ + "anyhow", + "rand 0.8.5", + "serde", + "serde_json", + "sqlx", + "tokio", + "tracing", + "url", + "vise", + "zksync_health_check", +] + [[package]] name = "zksync_env_config" version = "0.1.0" @@ -7174,6 +7218,7 @@ dependencies = [ "ctrlc", "futures 0.3.30", "prometheus_exporter", + "prover_dal", "reqwest", "serde", "serde_json", @@ -7186,7 +7231,6 @@ dependencies = [ "zkevm_test_harness 1.3.3", "zkevm_test_harness 1.4.2", "zksync_config", - "zksync_dal", "zksync_env_config", "zksync_object_store", "zksync_prover_fri_types", @@ -7241,6 +7285,7 @@ dependencies = [ "futures 0.3.30", "local-ip-address", "prometheus_exporter", + "prover_dal", "regex", "reqwest", "serde", @@ -7252,7 +7297,6 @@ dependencies = [ "vlog", "zkevm_test_harness 1.4.2", "zksync_config", - "zksync_dal", "zksync_env_config", "zksync_object_store", "zksync_prover_fri_types", @@ -7272,6 +7316,7 @@ dependencies = [ "futures 0.3.30", "log", "prometheus_exporter", + "prover_dal", "reqwest", "serde", "tokio", @@ -7279,7 +7324,6 @@ dependencies = [ "vise", "vlog", "zksync_config", - "zksync_dal", "zksync_env_config", "zksync_object_store", "zksync_prover_interface", @@ -7302,13 +7346,13 @@ name = "zksync_prover_fri_utils" version = "0.1.0" dependencies = [ "anyhow", + "prover_dal", "regex", "reqwest", "serde", "tracing", "vise", "zksync_config", - "zksync_dal", "zksync_object_store", "zksync_prover_fri_types", "zksync_types", @@ -7386,7 +7430,7 @@ dependencies = [ "hex", "itertools 0.10.5", "num", - "num_enum", + "num_enum 0.6.1", "once_cell", "prost", "rlp", @@ -7442,6 +7486,7 @@ dependencies = [ "multivm", "once_cell", "prometheus_exporter", + "prover_dal", "rand 0.8.5", "serde", "structopt", @@ -7477,6 +7522,7 @@ dependencies = [ "ctrlc", "futures 0.3.30", "prometheus_exporter", + "prover_dal", "queues", "serde", "structopt", @@ -7486,7 +7532,6 @@ dependencies = [ "vk_setup_data_generator_server_fri", "vlog", "zksync_config", - "zksync_dal", "zksync_env_config", "zksync_object_store", "zksync_prover_fri_types", diff --git a/prover/proof_fri_compressor/Cargo.toml b/prover/proof_fri_compressor/Cargo.toml index 6af1d47fcbed..a1406f251b32 100644 --- a/prover/proof_fri_compressor/Cargo.toml +++ b/prover/proof_fri_compressor/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" } zksync_types = { path = "../../core/lib/types" } -zksync_dal = { path = "../../core/lib/dal" } +prover_dal = { path = "../prover_dal" } zksync_config = { path = "../../core/lib/config" } zksync_env_config = { path = "../../core/lib/env_config" } zksync_object_store = { path = "../../core/lib/object_store" } diff --git a/prover/proof_fri_compressor/src/compressor.rs b/prover/proof_fri_compressor/src/compressor.rs index a4260acc9996..aefda558c3ac 100644 --- a/prover/proof_fri_compressor/src/compressor.rs +++ b/prover/proof_fri_compressor/src/compressor.rs @@ -3,6 +3,7 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; use async_trait::async_trait; use circuit_sequencer_api::proof::FinalProof; +use prover_dal::{ConnectionPool, Prover, ProverDals}; use tokio::task::JoinHandle; use zkevm_test_harness::proof_wrapper_utils::{wrap_proof, WrapperConfig}; use zkevm_test_harness_1_3_3::{ @@ -15,7 +16,6 @@ use zkevm_test_harness_1_3_3::{ }, witness::oracle::VmWitnessOracle, }; -use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ circuit_definitions::{ @@ -36,7 +36,7 @@ use crate::metrics::METRICS; pub struct ProofCompressor { blob_store: Arc, - pool: ConnectionPool, + pool: ConnectionPool, compression_mode: u8, verify_wrapper_proof: bool, max_attempts: u32, @@ -45,7 +45,7 @@ pub struct ProofCompressor { impl ProofCompressor { pub fn new( blob_store: Arc, - pool: ConnectionPool, + pool: ConnectionPool, compression_mode: u8, verify_wrapper_proof: bool, max_attempts: u32, diff --git a/prover/proof_fri_compressor/src/main.rs b/prover/proof_fri_compressor/src/main.rs index e148a3687669..24f0793a0f86 100644 --- a/prover/proof_fri_compressor/src/main.rs +++ b/prover/proof_fri_compressor/src/main.rs @@ -2,10 +2,10 @@ use std::{env, time::Duration}; use anyhow::Context as _; use prometheus_exporter::PrometheusExporterConfig; +use prover_dal::{ConnectionPool, Prover}; use structopt::StructOpt; use tokio::sync::{oneshot, watch}; use zksync_config::configs::{FriProofCompressorConfig, ObservabilityConfig, PostgresConfig}; -use zksync_dal::ConnectionPool; use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; use zksync_queued_job_processor::JobProcessor; @@ -51,7 +51,7 @@ async fn main() -> anyhow::Result<()> { let opt = Opt::from_args(); let config = FriProofCompressorConfig::from_env().context("FriProofCompressorConfig")?; let postgres_config = PostgresConfig::from_env().context("PostgresConfig::from_env()")?; - let pool = ConnectionPool::singleton(postgres_config.prover_url()?) + let pool = ConnectionPool::::singleton(postgres_config.prover_url()?) .build() .await .context("failed to build a connection pool")?; diff --git a/core/lib/dal/.sqlx/query-00b88ec7fcf40bb18e0018b7c76f6e1df560ab1e8935564355236e90b6147d2f.json b/prover/prover_dal/.sqlx/query-00b88ec7fcf40bb18e0018b7c76f6e1df560ab1e8935564355236e90b6147d2f.json similarity index 100% rename from core/lib/dal/.sqlx/query-00b88ec7fcf40bb18e0018b7c76f6e1df560ab1e8935564355236e90b6147d2f.json rename to prover/prover_dal/.sqlx/query-00b88ec7fcf40bb18e0018b7c76f6e1df560ab1e8935564355236e90b6147d2f.json diff --git a/core/lib/dal/.sqlx/query-01ac5343beb09ec5bd45b39d560e57a83f37da8999849377dfad60b44989be39.json b/prover/prover_dal/.sqlx/query-01ac5343beb09ec5bd45b39d560e57a83f37da8999849377dfad60b44989be39.json similarity index 100% rename from core/lib/dal/.sqlx/query-01ac5343beb09ec5bd45b39d560e57a83f37da8999849377dfad60b44989be39.json rename to prover/prover_dal/.sqlx/query-01ac5343beb09ec5bd45b39d560e57a83f37da8999849377dfad60b44989be39.json diff --git a/core/lib/dal/.sqlx/query-0d13b8947b1bafa9e5bc6fdc70a986511265c541d81b1d21f0a751ae1399c626.json b/prover/prover_dal/.sqlx/query-0d13b8947b1bafa9e5bc6fdc70a986511265c541d81b1d21f0a751ae1399c626.json similarity index 100% rename from core/lib/dal/.sqlx/query-0d13b8947b1bafa9e5bc6fdc70a986511265c541d81b1d21f0a751ae1399c626.json rename to prover/prover_dal/.sqlx/query-0d13b8947b1bafa9e5bc6fdc70a986511265c541d81b1d21f0a751ae1399c626.json diff --git a/core/lib/dal/.sqlx/query-12ab208f416e2875f89e558f0d4aff3a06b7a9c1866132d62e4449fa9436c7c4.json b/prover/prover_dal/.sqlx/query-12ab208f416e2875f89e558f0d4aff3a06b7a9c1866132d62e4449fa9436c7c4.json similarity index 100% rename from core/lib/dal/.sqlx/query-12ab208f416e2875f89e558f0d4aff3a06b7a9c1866132d62e4449fa9436c7c4.json rename to prover/prover_dal/.sqlx/query-12ab208f416e2875f89e558f0d4aff3a06b7a9c1866132d62e4449fa9436c7c4.json diff --git a/core/lib/dal/.sqlx/query-15858168fea6808c6d59d0e6d8f28a20420763a3a22899ad0e5f4b953b615a9e.json b/prover/prover_dal/.sqlx/query-15858168fea6808c6d59d0e6d8f28a20420763a3a22899ad0e5f4b953b615a9e.json similarity index 100% rename from core/lib/dal/.sqlx/query-15858168fea6808c6d59d0e6d8f28a20420763a3a22899ad0e5f4b953b615a9e.json rename to prover/prover_dal/.sqlx/query-15858168fea6808c6d59d0e6d8f28a20420763a3a22899ad0e5f4b953b615a9e.json diff --git a/core/lib/dal/.sqlx/query-1bc6597117db032b87df33040d61610ffa7f169d560e79e89b99eedf681c6773.json b/prover/prover_dal/.sqlx/query-1bc6597117db032b87df33040d61610ffa7f169d560e79e89b99eedf681c6773.json similarity index 100% rename from core/lib/dal/.sqlx/query-1bc6597117db032b87df33040d61610ffa7f169d560e79e89b99eedf681c6773.json rename to prover/prover_dal/.sqlx/query-1bc6597117db032b87df33040d61610ffa7f169d560e79e89b99eedf681c6773.json diff --git a/core/lib/dal/.sqlx/query-204cfd593c62a5a1582215a5f0f4d3648b75bf01ff336bbd77d15f9aa5fd6443.json b/prover/prover_dal/.sqlx/query-204cfd593c62a5a1582215a5f0f4d3648b75bf01ff336bbd77d15f9aa5fd6443.json similarity index 100% rename from core/lib/dal/.sqlx/query-204cfd593c62a5a1582215a5f0f4d3648b75bf01ff336bbd77d15f9aa5fd6443.json rename to prover/prover_dal/.sqlx/query-204cfd593c62a5a1582215a5f0f4d3648b75bf01ff336bbd77d15f9aa5fd6443.json diff --git a/core/lib/dal/.sqlx/query-2b626262c8003817ee02978f77452554ccfb5b83f00efdc12bed0f60ef439785.json b/prover/prover_dal/.sqlx/query-2b626262c8003817ee02978f77452554ccfb5b83f00efdc12bed0f60ef439785.json similarity index 100% rename from core/lib/dal/.sqlx/query-2b626262c8003817ee02978f77452554ccfb5b83f00efdc12bed0f60ef439785.json rename to prover/prover_dal/.sqlx/query-2b626262c8003817ee02978f77452554ccfb5b83f00efdc12bed0f60ef439785.json diff --git a/core/lib/dal/.sqlx/query-2d31fcce581975a82d6156b52e35fb7a093b73727f75e0cb7db9cea480c95f5c.json b/prover/prover_dal/.sqlx/query-2d31fcce581975a82d6156b52e35fb7a093b73727f75e0cb7db9cea480c95f5c.json similarity index 100% rename from core/lib/dal/.sqlx/query-2d31fcce581975a82d6156b52e35fb7a093b73727f75e0cb7db9cea480c95f5c.json rename to prover/prover_dal/.sqlx/query-2d31fcce581975a82d6156b52e35fb7a093b73727f75e0cb7db9cea480c95f5c.json diff --git a/core/lib/dal/.sqlx/query-33d6be45b246523ad76f9ae512322ff6372f63ecadb504a329499b02e7d3550e.json b/prover/prover_dal/.sqlx/query-33d6be45b246523ad76f9ae512322ff6372f63ecadb504a329499b02e7d3550e.json similarity index 100% rename from core/lib/dal/.sqlx/query-33d6be45b246523ad76f9ae512322ff6372f63ecadb504a329499b02e7d3550e.json rename to prover/prover_dal/.sqlx/query-33d6be45b246523ad76f9ae512322ff6372f63ecadb504a329499b02e7d3550e.json diff --git a/core/lib/dal/.sqlx/query-35b87a3b7db0af87c6a95e9fe7ef9044ae85b579c7051301b40bd5f94df1f530.json b/prover/prover_dal/.sqlx/query-35b87a3b7db0af87c6a95e9fe7ef9044ae85b579c7051301b40bd5f94df1f530.json similarity index 100% rename from core/lib/dal/.sqlx/query-35b87a3b7db0af87c6a95e9fe7ef9044ae85b579c7051301b40bd5f94df1f530.json rename to prover/prover_dal/.sqlx/query-35b87a3b7db0af87c6a95e9fe7ef9044ae85b579c7051301b40bd5f94df1f530.json diff --git a/core/lib/dal/.sqlx/query-3c3abbf689fa64c6da7de69fd916769dbb04d3a61cf232892236c974660ffe64.json b/prover/prover_dal/.sqlx/query-3c3abbf689fa64c6da7de69fd916769dbb04d3a61cf232892236c974660ffe64.json similarity index 100% rename from core/lib/dal/.sqlx/query-3c3abbf689fa64c6da7de69fd916769dbb04d3a61cf232892236c974660ffe64.json rename to prover/prover_dal/.sqlx/query-3c3abbf689fa64c6da7de69fd916769dbb04d3a61cf232892236c974660ffe64.json diff --git a/core/lib/dal/.sqlx/query-3ec365c5c81f4678a905ae5bbd48b87ead36f593488437c6f67da629ca81e4fa.json b/prover/prover_dal/.sqlx/query-3ec365c5c81f4678a905ae5bbd48b87ead36f593488437c6f67da629ca81e4fa.json similarity index 100% rename from core/lib/dal/.sqlx/query-3ec365c5c81f4678a905ae5bbd48b87ead36f593488437c6f67da629ca81e4fa.json rename to prover/prover_dal/.sqlx/query-3ec365c5c81f4678a905ae5bbd48b87ead36f593488437c6f67da629ca81e4fa.json diff --git a/core/lib/dal/.sqlx/query-46c4696fff5a4b8cc5cb46b05645da82065836fe17687ffad04126a6a8b2b27c.json b/prover/prover_dal/.sqlx/query-46c4696fff5a4b8cc5cb46b05645da82065836fe17687ffad04126a6a8b2b27c.json similarity index 100% rename from core/lib/dal/.sqlx/query-46c4696fff5a4b8cc5cb46b05645da82065836fe17687ffad04126a6a8b2b27c.json rename to prover/prover_dal/.sqlx/query-46c4696fff5a4b8cc5cb46b05645da82065836fe17687ffad04126a6a8b2b27c.json diff --git a/core/lib/dal/.sqlx/query-4d263992ed6d5abbd7d3ca43af9d772d8801b0ae673b7173ae08a1fa6cbf67b2.json b/prover/prover_dal/.sqlx/query-4d263992ed6d5abbd7d3ca43af9d772d8801b0ae673b7173ae08a1fa6cbf67b2.json similarity index 100% rename from core/lib/dal/.sqlx/query-4d263992ed6d5abbd7d3ca43af9d772d8801b0ae673b7173ae08a1fa6cbf67b2.json rename to prover/prover_dal/.sqlx/query-4d263992ed6d5abbd7d3ca43af9d772d8801b0ae673b7173ae08a1fa6cbf67b2.json diff --git a/core/lib/dal/.sqlx/query-4d92a133a36afd682a84fbfd75aafca34d61347e0e2e29fb07ca3d1b8b1f309c.json b/prover/prover_dal/.sqlx/query-4d92a133a36afd682a84fbfd75aafca34d61347e0e2e29fb07ca3d1b8b1f309c.json similarity index 100% rename from core/lib/dal/.sqlx/query-4d92a133a36afd682a84fbfd75aafca34d61347e0e2e29fb07ca3d1b8b1f309c.json rename to prover/prover_dal/.sqlx/query-4d92a133a36afd682a84fbfd75aafca34d61347e0e2e29fb07ca3d1b8b1f309c.json diff --git a/core/lib/dal/.sqlx/query-510bfea2346a8c63e74222e1159de366f88c20d00a8d928b6cf4caae0702b333.json b/prover/prover_dal/.sqlx/query-510bfea2346a8c63e74222e1159de366f88c20d00a8d928b6cf4caae0702b333.json similarity index 100% rename from core/lib/dal/.sqlx/query-510bfea2346a8c63e74222e1159de366f88c20d00a8d928b6cf4caae0702b333.json rename to prover/prover_dal/.sqlx/query-510bfea2346a8c63e74222e1159de366f88c20d00a8d928b6cf4caae0702b333.json diff --git a/core/lib/dal/.sqlx/query-534822a226068cde83ad8c30b569a8f447824a5ab466bb6eea1710e8aeaa2c56.json b/prover/prover_dal/.sqlx/query-534822a226068cde83ad8c30b569a8f447824a5ab466bb6eea1710e8aeaa2c56.json similarity index 100% rename from core/lib/dal/.sqlx/query-534822a226068cde83ad8c30b569a8f447824a5ab466bb6eea1710e8aeaa2c56.json rename to prover/prover_dal/.sqlx/query-534822a226068cde83ad8c30b569a8f447824a5ab466bb6eea1710e8aeaa2c56.json diff --git a/core/lib/dal/.sqlx/query-53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39.json b/prover/prover_dal/.sqlx/query-53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39.json similarity index 100% rename from core/lib/dal/.sqlx/query-53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39.json rename to prover/prover_dal/.sqlx/query-53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39.json diff --git a/core/lib/dal/.sqlx/query-5821f1446983260168cec366af26009503182c300877e74a8539f231050e6f85.json b/prover/prover_dal/.sqlx/query-5821f1446983260168cec366af26009503182c300877e74a8539f231050e6f85.json similarity index 100% rename from core/lib/dal/.sqlx/query-5821f1446983260168cec366af26009503182c300877e74a8539f231050e6f85.json rename to prover/prover_dal/.sqlx/query-5821f1446983260168cec366af26009503182c300877e74a8539f231050e6f85.json diff --git a/core/lib/dal/.sqlx/query-5e781f84ec41edd0941fa84de837effac442434c6e734d977e6682a7484abe7f.json b/prover/prover_dal/.sqlx/query-5e781f84ec41edd0941fa84de837effac442434c6e734d977e6682a7484abe7f.json similarity index 100% rename from core/lib/dal/.sqlx/query-5e781f84ec41edd0941fa84de837effac442434c6e734d977e6682a7484abe7f.json rename to prover/prover_dal/.sqlx/query-5e781f84ec41edd0941fa84de837effac442434c6e734d977e6682a7484abe7f.json diff --git a/core/lib/dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json b/prover/prover_dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json similarity index 100% rename from core/lib/dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json rename to prover/prover_dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json diff --git a/core/lib/dal/.sqlx/query-6ae2ed34230beae0e86c584e293e7ee767e4c98706246eb113498c0f817f5f38.json b/prover/prover_dal/.sqlx/query-6ae2ed34230beae0e86c584e293e7ee767e4c98706246eb113498c0f817f5f38.json similarity index 100% rename from core/lib/dal/.sqlx/query-6ae2ed34230beae0e86c584e293e7ee767e4c98706246eb113498c0f817f5f38.json rename to prover/prover_dal/.sqlx/query-6ae2ed34230beae0e86c584e293e7ee767e4c98706246eb113498c0f817f5f38.json diff --git a/core/lib/dal/.sqlx/query-75f6eaa518e7840374c4e44b0788bf92c7f2c55386c8208e3a82b30456abd5b4.json b/prover/prover_dal/.sqlx/query-75f6eaa518e7840374c4e44b0788bf92c7f2c55386c8208e3a82b30456abd5b4.json similarity index 100% rename from core/lib/dal/.sqlx/query-75f6eaa518e7840374c4e44b0788bf92c7f2c55386c8208e3a82b30456abd5b4.json rename to prover/prover_dal/.sqlx/query-75f6eaa518e7840374c4e44b0788bf92c7f2c55386c8208e3a82b30456abd5b4.json diff --git a/core/lib/dal/.sqlx/query-7a2145e2234a7896031bbc1ce82715e903f3b399886c2c73e838bd924fed6776.json b/prover/prover_dal/.sqlx/query-7a2145e2234a7896031bbc1ce82715e903f3b399886c2c73e838bd924fed6776.json similarity index 100% rename from core/lib/dal/.sqlx/query-7a2145e2234a7896031bbc1ce82715e903f3b399886c2c73e838bd924fed6776.json rename to prover/prover_dal/.sqlx/query-7a2145e2234a7896031bbc1ce82715e903f3b399886c2c73e838bd924fed6776.json diff --git a/core/lib/dal/.sqlx/query-7a8fffe8d4e3085e00c98f770d250d625f057acf1440b6550375ce5509a816a6.json b/prover/prover_dal/.sqlx/query-7a8fffe8d4e3085e00c98f770d250d625f057acf1440b6550375ce5509a816a6.json similarity index 100% rename from core/lib/dal/.sqlx/query-7a8fffe8d4e3085e00c98f770d250d625f057acf1440b6550375ce5509a816a6.json rename to prover/prover_dal/.sqlx/query-7a8fffe8d4e3085e00c98f770d250d625f057acf1440b6550375ce5509a816a6.json diff --git a/core/lib/dal/.sqlx/query-806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9.json b/prover/prover_dal/.sqlx/query-806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9.json similarity index 100% rename from core/lib/dal/.sqlx/query-806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9.json rename to prover/prover_dal/.sqlx/query-806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9.json diff --git a/core/lib/dal/.sqlx/query-8182690d0326b820d23fba49d391578db18c29cdca85b8b6aad86fe2a9bf6bbe.json b/prover/prover_dal/.sqlx/query-8182690d0326b820d23fba49d391578db18c29cdca85b8b6aad86fe2a9bf6bbe.json similarity index 100% rename from core/lib/dal/.sqlx/query-8182690d0326b820d23fba49d391578db18c29cdca85b8b6aad86fe2a9bf6bbe.json rename to prover/prover_dal/.sqlx/query-8182690d0326b820d23fba49d391578db18c29cdca85b8b6aad86fe2a9bf6bbe.json diff --git a/core/lib/dal/.sqlx/query-8f5e89ccadd4ea1da7bfe9793a1cbb724af0f0216433a70f19d784e3f2afbc9f.json b/prover/prover_dal/.sqlx/query-8f5e89ccadd4ea1da7bfe9793a1cbb724af0f0216433a70f19d784e3f2afbc9f.json similarity index 100% rename from core/lib/dal/.sqlx/query-8f5e89ccadd4ea1da7bfe9793a1cbb724af0f0216433a70f19d784e3f2afbc9f.json rename to prover/prover_dal/.sqlx/query-8f5e89ccadd4ea1da7bfe9793a1cbb724af0f0216433a70f19d784e3f2afbc9f.json diff --git a/core/lib/dal/.sqlx/query-9ef2f43e6201cc00a0e1425a666a36532fee1450733849852dfd20e18ded1f03.json b/prover/prover_dal/.sqlx/query-9ef2f43e6201cc00a0e1425a666a36532fee1450733849852dfd20e18ded1f03.json similarity index 100% rename from core/lib/dal/.sqlx/query-9ef2f43e6201cc00a0e1425a666a36532fee1450733849852dfd20e18ded1f03.json rename to prover/prover_dal/.sqlx/query-9ef2f43e6201cc00a0e1425a666a36532fee1450733849852dfd20e18ded1f03.json diff --git a/core/lib/dal/.sqlx/query-a0e2b2c034cc5f668f0b3d43b94d2e2326d7ace079b095def52723a45b65d3f3.json b/prover/prover_dal/.sqlx/query-a0e2b2c034cc5f668f0b3d43b94d2e2326d7ace079b095def52723a45b65d3f3.json similarity index 100% rename from core/lib/dal/.sqlx/query-a0e2b2c034cc5f668f0b3d43b94d2e2326d7ace079b095def52723a45b65d3f3.json rename to prover/prover_dal/.sqlx/query-a0e2b2c034cc5f668f0b3d43b94d2e2326d7ace079b095def52723a45b65d3f3.json diff --git a/core/lib/dal/.sqlx/query-a4861c931e84d897c27f666de1c5ca679a0459a012899a373c67393d30d12601.json b/prover/prover_dal/.sqlx/query-a4861c931e84d897c27f666de1c5ca679a0459a012899a373c67393d30d12601.json similarity index 100% rename from core/lib/dal/.sqlx/query-a4861c931e84d897c27f666de1c5ca679a0459a012899a373c67393d30d12601.json rename to prover/prover_dal/.sqlx/query-a4861c931e84d897c27f666de1c5ca679a0459a012899a373c67393d30d12601.json diff --git a/core/lib/dal/.sqlx/query-a84ee70bec8c03bd51e1c6bad44c9a64904026506914abae2946e5d353d6a604.json b/prover/prover_dal/.sqlx/query-a84ee70bec8c03bd51e1c6bad44c9a64904026506914abae2946e5d353d6a604.json similarity index 100% rename from core/lib/dal/.sqlx/query-a84ee70bec8c03bd51e1c6bad44c9a64904026506914abae2946e5d353d6a604.json rename to prover/prover_dal/.sqlx/query-a84ee70bec8c03bd51e1c6bad44c9a64904026506914abae2946e5d353d6a604.json diff --git a/core/lib/dal/.sqlx/query-aa91697157517322b0dbb53dca99f41220c51f58a03c61d6b7789eab0504e320.json b/prover/prover_dal/.sqlx/query-aa91697157517322b0dbb53dca99f41220c51f58a03c61d6b7789eab0504e320.json similarity index 100% rename from core/lib/dal/.sqlx/query-aa91697157517322b0dbb53dca99f41220c51f58a03c61d6b7789eab0504e320.json rename to prover/prover_dal/.sqlx/query-aa91697157517322b0dbb53dca99f41220c51f58a03c61d6b7789eab0504e320.json diff --git a/core/lib/dal/.sqlx/query-aaf4fb97c95a5290fb1620cd868477dcf21955e0921ba648ba2e751dbfc3cb45.json b/prover/prover_dal/.sqlx/query-aaf4fb97c95a5290fb1620cd868477dcf21955e0921ba648ba2e751dbfc3cb45.json similarity index 100% rename from core/lib/dal/.sqlx/query-aaf4fb97c95a5290fb1620cd868477dcf21955e0921ba648ba2e751dbfc3cb45.json rename to prover/prover_dal/.sqlx/query-aaf4fb97c95a5290fb1620cd868477dcf21955e0921ba648ba2e751dbfc3cb45.json diff --git a/core/lib/dal/.sqlx/query-af72fabd90eb43fb315f46d7fe9f724216807ffd481cd6f7f19968e42e52b284.json b/prover/prover_dal/.sqlx/query-af72fabd90eb43fb315f46d7fe9f724216807ffd481cd6f7f19968e42e52b284.json similarity index 100% rename from core/lib/dal/.sqlx/query-af72fabd90eb43fb315f46d7fe9f724216807ffd481cd6f7f19968e42e52b284.json rename to prover/prover_dal/.sqlx/query-af72fabd90eb43fb315f46d7fe9f724216807ffd481cd6f7f19968e42e52b284.json diff --git a/core/lib/dal/.sqlx/query-afc24bd1407dba82cd3dc9e7ee71ac4ab2d73bda6022700aeb0a630a2563a4b4.json b/prover/prover_dal/.sqlx/query-afc24bd1407dba82cd3dc9e7ee71ac4ab2d73bda6022700aeb0a630a2563a4b4.json similarity index 100% rename from core/lib/dal/.sqlx/query-afc24bd1407dba82cd3dc9e7ee71ac4ab2d73bda6022700aeb0a630a2563a4b4.json rename to prover/prover_dal/.sqlx/query-afc24bd1407dba82cd3dc9e7ee71ac4ab2d73bda6022700aeb0a630a2563a4b4.json diff --git a/core/lib/dal/.sqlx/query-b17c71983da060f08616e001b42f8dcbcb014b4f808c6232abd9a83354c995ac.json b/prover/prover_dal/.sqlx/query-b17c71983da060f08616e001b42f8dcbcb014b4f808c6232abd9a83354c995ac.json similarity index 100% rename from core/lib/dal/.sqlx/query-b17c71983da060f08616e001b42f8dcbcb014b4f808c6232abd9a83354c995ac.json rename to prover/prover_dal/.sqlx/query-b17c71983da060f08616e001b42f8dcbcb014b4f808c6232abd9a83354c995ac.json diff --git a/core/lib/dal/.sqlx/query-b23ddb16513d69331056b94d466663a9c5ea62ea7c99a77941eb8f05d4454125.json b/prover/prover_dal/.sqlx/query-b23ddb16513d69331056b94d466663a9c5ea62ea7c99a77941eb8f05d4454125.json similarity index 100% rename from core/lib/dal/.sqlx/query-b23ddb16513d69331056b94d466663a9c5ea62ea7c99a77941eb8f05d4454125.json rename to prover/prover_dal/.sqlx/query-b23ddb16513d69331056b94d466663a9c5ea62ea7c99a77941eb8f05d4454125.json diff --git a/core/lib/dal/.sqlx/query-b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5.json b/prover/prover_dal/.sqlx/query-b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5.json similarity index 100% rename from core/lib/dal/.sqlx/query-b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5.json rename to prover/prover_dal/.sqlx/query-b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5.json diff --git a/core/lib/dal/.sqlx/query-b367ecb1ebee86ec598c4079591f8c12deeca6b8843fe3869cc2b02b30da5de6.json b/prover/prover_dal/.sqlx/query-b367ecb1ebee86ec598c4079591f8c12deeca6b8843fe3869cc2b02b30da5de6.json similarity index 100% rename from core/lib/dal/.sqlx/query-b367ecb1ebee86ec598c4079591f8c12deeca6b8843fe3869cc2b02b30da5de6.json rename to prover/prover_dal/.sqlx/query-b367ecb1ebee86ec598c4079591f8c12deeca6b8843fe3869cc2b02b30da5de6.json diff --git a/core/lib/dal/.sqlx/query-b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040.json b/prover/prover_dal/.sqlx/query-b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040.json similarity index 100% rename from core/lib/dal/.sqlx/query-b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040.json rename to prover/prover_dal/.sqlx/query-b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040.json diff --git a/core/lib/dal/.sqlx/query-b4304b9afb9f838eee1fe95af5fd964d4bb39b9dcd18fb03bc11ce2fb32b7fb3.json b/prover/prover_dal/.sqlx/query-b4304b9afb9f838eee1fe95af5fd964d4bb39b9dcd18fb03bc11ce2fb32b7fb3.json similarity index 100% rename from core/lib/dal/.sqlx/query-b4304b9afb9f838eee1fe95af5fd964d4bb39b9dcd18fb03bc11ce2fb32b7fb3.json rename to prover/prover_dal/.sqlx/query-b4304b9afb9f838eee1fe95af5fd964d4bb39b9dcd18fb03bc11ce2fb32b7fb3.json diff --git a/core/lib/dal/.sqlx/query-b4794e6a0c2366d5d95ab373c310103263af3ff5cb6c9dc5df59d3cd2a5e56b4.json b/prover/prover_dal/.sqlx/query-b4794e6a0c2366d5d95ab373c310103263af3ff5cb6c9dc5df59d3cd2a5e56b4.json similarity index 100% rename from core/lib/dal/.sqlx/query-b4794e6a0c2366d5d95ab373c310103263af3ff5cb6c9dc5df59d3cd2a5e56b4.json rename to prover/prover_dal/.sqlx/query-b4794e6a0c2366d5d95ab373c310103263af3ff5cb6c9dc5df59d3cd2a5e56b4.json diff --git a/core/lib/dal/.sqlx/query-bfb80956a18eabf266f5b5a9d62912d57f8eb2a38bdb7884fc812a2897a3a660.json b/prover/prover_dal/.sqlx/query-bfb80956a18eabf266f5b5a9d62912d57f8eb2a38bdb7884fc812a2897a3a660.json similarity index 100% rename from core/lib/dal/.sqlx/query-bfb80956a18eabf266f5b5a9d62912d57f8eb2a38bdb7884fc812a2897a3a660.json rename to prover/prover_dal/.sqlx/query-bfb80956a18eabf266f5b5a9d62912d57f8eb2a38bdb7884fc812a2897a3a660.json diff --git a/core/lib/dal/.sqlx/query-c10cf20825de4d24300c7ec50d4a653852f7e43670076eb2ebcd49542a870539.json b/prover/prover_dal/.sqlx/query-c10cf20825de4d24300c7ec50d4a653852f7e43670076eb2ebcd49542a870539.json similarity index 100% rename from core/lib/dal/.sqlx/query-c10cf20825de4d24300c7ec50d4a653852f7e43670076eb2ebcd49542a870539.json rename to prover/prover_dal/.sqlx/query-c10cf20825de4d24300c7ec50d4a653852f7e43670076eb2ebcd49542a870539.json diff --git a/core/lib/dal/.sqlx/query-c23d5ff919ade5898c6a912780ae899e360650afccb34f5cc301b5cbac4a3d36.json b/prover/prover_dal/.sqlx/query-c23d5ff919ade5898c6a912780ae899e360650afccb34f5cc301b5cbac4a3d36.json similarity index 100% rename from core/lib/dal/.sqlx/query-c23d5ff919ade5898c6a912780ae899e360650afccb34f5cc301b5cbac4a3d36.json rename to prover/prover_dal/.sqlx/query-c23d5ff919ade5898c6a912780ae899e360650afccb34f5cc301b5cbac4a3d36.json diff --git a/core/lib/dal/.sqlx/query-c41312e01aa66897552e8be9acc8d43c31ec7441a7f6c5040e120810ebbb72f7.json b/prover/prover_dal/.sqlx/query-c41312e01aa66897552e8be9acc8d43c31ec7441a7f6c5040e120810ebbb72f7.json similarity index 100% rename from core/lib/dal/.sqlx/query-c41312e01aa66897552e8be9acc8d43c31ec7441a7f6c5040e120810ebbb72f7.json rename to prover/prover_dal/.sqlx/query-c41312e01aa66897552e8be9acc8d43c31ec7441a7f6c5040e120810ebbb72f7.json diff --git a/core/lib/dal/.sqlx/query-c706a49ff54f6b424e24d061fe7ac429aac3c030f7e226a1264243d8cdae038d.json b/prover/prover_dal/.sqlx/query-c706a49ff54f6b424e24d061fe7ac429aac3c030f7e226a1264243d8cdae038d.json similarity index 100% rename from core/lib/dal/.sqlx/query-c706a49ff54f6b424e24d061fe7ac429aac3c030f7e226a1264243d8cdae038d.json rename to prover/prover_dal/.sqlx/query-c706a49ff54f6b424e24d061fe7ac429aac3c030f7e226a1264243d8cdae038d.json diff --git a/core/lib/dal/.sqlx/query-ca9d06141265b8524ee28c55569cb21a635037d89ce24dd3ad58ffaadb59594a.json b/prover/prover_dal/.sqlx/query-ca9d06141265b8524ee28c55569cb21a635037d89ce24dd3ad58ffaadb59594a.json similarity index 100% rename from core/lib/dal/.sqlx/query-ca9d06141265b8524ee28c55569cb21a635037d89ce24dd3ad58ffaadb59594a.json rename to prover/prover_dal/.sqlx/query-ca9d06141265b8524ee28c55569cb21a635037d89ce24dd3ad58ffaadb59594a.json diff --git a/core/lib/dal/.sqlx/query-ce5779092feb8a3d3e2c5e395783e67f08f2ead5f55bfb6594e50346bf9cf2ef.json b/prover/prover_dal/.sqlx/query-ce5779092feb8a3d3e2c5e395783e67f08f2ead5f55bfb6594e50346bf9cf2ef.json similarity index 100% rename from core/lib/dal/.sqlx/query-ce5779092feb8a3d3e2c5e395783e67f08f2ead5f55bfb6594e50346bf9cf2ef.json rename to prover/prover_dal/.sqlx/query-ce5779092feb8a3d3e2c5e395783e67f08f2ead5f55bfb6594e50346bf9cf2ef.json diff --git a/core/lib/dal/.sqlx/query-d7e8eabd7b43ff62838fbc847e4813d2b2d411bd5faf8306cd48db500532b711.json b/prover/prover_dal/.sqlx/query-d7e8eabd7b43ff62838fbc847e4813d2b2d411bd5faf8306cd48db500532b711.json similarity index 100% rename from core/lib/dal/.sqlx/query-d7e8eabd7b43ff62838fbc847e4813d2b2d411bd5faf8306cd48db500532b711.json rename to prover/prover_dal/.sqlx/query-d7e8eabd7b43ff62838fbc847e4813d2b2d411bd5faf8306cd48db500532b711.json diff --git a/core/lib/dal/.sqlx/query-d8e3ee346375e4b6a8b2c73a3827e88abd0f8164c2413dc83c91c29665ca645e.json b/prover/prover_dal/.sqlx/query-d8e3ee346375e4b6a8b2c73a3827e88abd0f8164c2413dc83c91c29665ca645e.json similarity index 100% rename from core/lib/dal/.sqlx/query-d8e3ee346375e4b6a8b2c73a3827e88abd0f8164c2413dc83c91c29665ca645e.json rename to prover/prover_dal/.sqlx/query-d8e3ee346375e4b6a8b2c73a3827e88abd0f8164c2413dc83c91c29665ca645e.json diff --git a/core/lib/dal/.sqlx/query-db3e74f0e83ffbf84a6d61e560f2060fbea775dc185f639139fbfd23e4d5f3c6.json b/prover/prover_dal/.sqlx/query-db3e74f0e83ffbf84a6d61e560f2060fbea775dc185f639139fbfd23e4d5f3c6.json similarity index 100% rename from core/lib/dal/.sqlx/query-db3e74f0e83ffbf84a6d61e560f2060fbea775dc185f639139fbfd23e4d5f3c6.json rename to prover/prover_dal/.sqlx/query-db3e74f0e83ffbf84a6d61e560f2060fbea775dc185f639139fbfd23e4d5f3c6.json diff --git a/core/lib/dal/.sqlx/query-df00e33809768120e395d8f740770a4e629b2a1cde641e74e4e55bb100df809f.json b/prover/prover_dal/.sqlx/query-df00e33809768120e395d8f740770a4e629b2a1cde641e74e4e55bb100df809f.json similarity index 100% rename from core/lib/dal/.sqlx/query-df00e33809768120e395d8f740770a4e629b2a1cde641e74e4e55bb100df809f.json rename to prover/prover_dal/.sqlx/query-df00e33809768120e395d8f740770a4e629b2a1cde641e74e4e55bb100df809f.json diff --git a/core/lib/dal/.sqlx/query-e3479d12d9dc97001cf03dc42d9b957e92cd375ec33fe16f855f319ffc0b208e.json b/prover/prover_dal/.sqlx/query-e3479d12d9dc97001cf03dc42d9b957e92cd375ec33fe16f855f319ffc0b208e.json similarity index 100% rename from core/lib/dal/.sqlx/query-e3479d12d9dc97001cf03dc42d9b957e92cd375ec33fe16f855f319ffc0b208e.json rename to prover/prover_dal/.sqlx/query-e3479d12d9dc97001cf03dc42d9b957e92cd375ec33fe16f855f319ffc0b208e.json diff --git a/core/lib/dal/.sqlx/query-e74a34a59e6afda689b0ec9e19071ababa66e4a443fbefbfffca72b7540b075b.json b/prover/prover_dal/.sqlx/query-e74a34a59e6afda689b0ec9e19071ababa66e4a443fbefbfffca72b7540b075b.json similarity index 100% rename from core/lib/dal/.sqlx/query-e74a34a59e6afda689b0ec9e19071ababa66e4a443fbefbfffca72b7540b075b.json rename to prover/prover_dal/.sqlx/query-e74a34a59e6afda689b0ec9e19071ababa66e4a443fbefbfffca72b7540b075b.json diff --git a/core/lib/dal/.sqlx/query-e9adf5b5a1ab84c20a514a7775f91a9984685eaaaa0a8b223410d560a15a3034.json b/prover/prover_dal/.sqlx/query-e9adf5b5a1ab84c20a514a7775f91a9984685eaaaa0a8b223410d560a15a3034.json similarity index 100% rename from core/lib/dal/.sqlx/query-e9adf5b5a1ab84c20a514a7775f91a9984685eaaaa0a8b223410d560a15a3034.json rename to prover/prover_dal/.sqlx/query-e9adf5b5a1ab84c20a514a7775f91a9984685eaaaa0a8b223410d560a15a3034.json diff --git a/core/lib/dal/.sqlx/query-e9ca863d6e77edd39a9fc55700a6686e655206601854799139c22c017a214744.json b/prover/prover_dal/.sqlx/query-e9ca863d6e77edd39a9fc55700a6686e655206601854799139c22c017a214744.json similarity index 100% rename from core/lib/dal/.sqlx/query-e9ca863d6e77edd39a9fc55700a6686e655206601854799139c22c017a214744.json rename to prover/prover_dal/.sqlx/query-e9ca863d6e77edd39a9fc55700a6686e655206601854799139c22c017a214744.json diff --git a/core/lib/dal/.sqlx/query-ec04b89218111a5dc8d5ade506ac3465e2211ef3013386feb12d4cc04e0eade9.json b/prover/prover_dal/.sqlx/query-ec04b89218111a5dc8d5ade506ac3465e2211ef3013386feb12d4cc04e0eade9.json similarity index 100% rename from core/lib/dal/.sqlx/query-ec04b89218111a5dc8d5ade506ac3465e2211ef3013386feb12d4cc04e0eade9.json rename to prover/prover_dal/.sqlx/query-ec04b89218111a5dc8d5ade506ac3465e2211ef3013386feb12d4cc04e0eade9.json diff --git a/core/lib/dal/.sqlx/query-edc61e1285bf6d3837acc67af4f15aaade450980719933089824eb8c494d64a4.json b/prover/prover_dal/.sqlx/query-edc61e1285bf6d3837acc67af4f15aaade450980719933089824eb8c494d64a4.json similarity index 100% rename from core/lib/dal/.sqlx/query-edc61e1285bf6d3837acc67af4f15aaade450980719933089824eb8c494d64a4.json rename to prover/prover_dal/.sqlx/query-edc61e1285bf6d3837acc67af4f15aaade450980719933089824eb8c494d64a4.json diff --git a/core/lib/dal/.sqlx/query-ef687be83e496d6647e4dfef9eabae63443c51deb818dd0affd1a0949b161737.json b/prover/prover_dal/.sqlx/query-ef687be83e496d6647e4dfef9eabae63443c51deb818dd0affd1a0949b161737.json similarity index 100% rename from core/lib/dal/.sqlx/query-ef687be83e496d6647e4dfef9eabae63443c51deb818dd0affd1a0949b161737.json rename to prover/prover_dal/.sqlx/query-ef687be83e496d6647e4dfef9eabae63443c51deb818dd0affd1a0949b161737.json diff --git a/core/lib/dal/.sqlx/query-f4362a61ab05af3d71a3232d2f017db60405a887f9f7fa0ca60aa7fc879ce630.json b/prover/prover_dal/.sqlx/query-f4362a61ab05af3d71a3232d2f017db60405a887f9f7fa0ca60aa7fc879ce630.json similarity index 100% rename from core/lib/dal/.sqlx/query-f4362a61ab05af3d71a3232d2f017db60405a887f9f7fa0ca60aa7fc879ce630.json rename to prover/prover_dal/.sqlx/query-f4362a61ab05af3d71a3232d2f017db60405a887f9f7fa0ca60aa7fc879ce630.json diff --git a/core/lib/dal/.sqlx/query-f717ca5d0890759496739a678955e6f8b7f88a0894a7f9e27fc26f93997d37c7.json b/prover/prover_dal/.sqlx/query-f717ca5d0890759496739a678955e6f8b7f88a0894a7f9e27fc26f93997d37c7.json similarity index 100% rename from core/lib/dal/.sqlx/query-f717ca5d0890759496739a678955e6f8b7f88a0894a7f9e27fc26f93997d37c7.json rename to prover/prover_dal/.sqlx/query-f717ca5d0890759496739a678955e6f8b7f88a0894a7f9e27fc26f93997d37c7.json diff --git a/core/lib/dal/.sqlx/query-fcddeb96dcd1611dedb2091c1be304e8a35fd65bf37e976b7106f57c57e70b9b.json b/prover/prover_dal/.sqlx/query-fcddeb96dcd1611dedb2091c1be304e8a35fd65bf37e976b7106f57c57e70b9b.json similarity index 100% rename from core/lib/dal/.sqlx/query-fcddeb96dcd1611dedb2091c1be304e8a35fd65bf37e976b7106f57c57e70b9b.json rename to prover/prover_dal/.sqlx/query-fcddeb96dcd1611dedb2091c1be304e8a35fd65bf37e976b7106f57c57e70b9b.json diff --git a/prover/prover_dal/Cargo.toml b/prover/prover_dal/Cargo.toml index 3c1eb89cc435..d117ef12c725 100644 --- a/prover/prover_dal/Cargo.toml +++ b/prover/prover_dal/Cargo.toml @@ -2,7 +2,27 @@ name = "prover_dal" version = "0.1.0" edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync-era" +license = "MIT OR Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] [dependencies] +zksync_db_connection = { path = "../../core/lib/db_connection" } +zksync_basic_types = { path = "../../core/lib/basic_types" } + +strum = { version = "0.24", features = ["derive"] } +sqlx = { version = "0.7.3", default-features = false, features = [ + "runtime-tokio", + "tls-native-tls", + "macros", + "postgres", + "bigdecimal", + "rust_decimal", + "chrono", + "json", + "migrate", + "ipnetwork", +] } diff --git a/core/lib/dal/doc/FriProofCompressorDal.md b/prover/prover_dal/doc/FriProofCompressorDal.md similarity index 100% rename from core/lib/dal/doc/FriProofCompressorDal.md rename to prover/prover_dal/doc/FriProofCompressorDal.md diff --git a/core/lib/dal/doc/FriProverDal.md b/prover/prover_dal/doc/FriProverDal.md similarity index 100% rename from core/lib/dal/doc/FriProverDal.md rename to prover/prover_dal/doc/FriProverDal.md diff --git a/core/lib/dal/doc/FriWitnessGeneratorDal.md b/prover/prover_dal/doc/FriWitnessGeneratorDal.md similarity index 100% rename from core/lib/dal/doc/FriWitnessGeneratorDal.md rename to prover/prover_dal/doc/FriWitnessGeneratorDal.md diff --git a/core/lib/dal/src/fri_gpu_prover_queue_dal.rs b/prover/prover_dal/src/fri_gpu_prover_queue_dal.rs similarity index 94% rename from core/lib/dal/src/fri_gpu_prover_queue_dal.rs rename to prover/prover_dal/src/fri_gpu_prover_queue_dal.rs index 8ea262f74030..1d31f306c0ff 100644 --- a/core/lib/dal/src/fri_gpu_prover_queue_dal.rs +++ b/prover/prover_dal/src/fri_gpu_prover_queue_dal.rs @@ -1,14 +1,13 @@ use std::time::Duration; -use crate::{ - fri_prover_dal::types::{GpuProverInstanceStatus, SocketAddress}, - time_utils::pg_interval_from_duration, - StorageProcessor, -}; +use zksync_basic_types::prover_dal::{GpuProverInstanceStatus, SocketAddress}; +use zksync_db_connection::processor::StorageProcessor; + +use crate::{pg_interval_from_duration, Prover}; #[derive(Debug)] pub struct FriGpuProverQueueDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Prover>, } impl FriGpuProverQueueDal<'_, '_> { diff --git a/core/lib/dal/src/fri_proof_compressor_dal.rs b/prover/prover_dal/src/fri_proof_compressor_dal.rs similarity index 97% rename from core/lib/dal/src/fri_proof_compressor_dal.rs rename to prover/prover_dal/src/fri_proof_compressor_dal.rs index 19e737221db7..ffcba4a8bf3c 100644 --- a/core/lib/dal/src/fri_proof_compressor_dal.rs +++ b/prover/prover_dal/src/fri_proof_compressor_dal.rs @@ -3,17 +3,17 @@ use std::{collections::HashMap, str::FromStr, time::Duration}; use sqlx::Row; use strum::{Display, EnumString}; -use zksync_types::L1BatchNumber; - -use crate::{ - fri_prover_dal::types::{JobCountStatistics, StuckJobs}, - time_utils::{duration_to_naive_time, pg_interval_from_duration}, - StorageProcessor, +use zksync_basic_types::{ + prover_dal::{JobCountStatistics, StuckJobs}, + L1BatchNumber, }; +use zksync_db_connection::processor::StorageProcessor; + +use crate::{duration_to_naive_time, pg_interval_from_duration, Prover}; #[derive(Debug)] pub struct FriProofCompressorDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Prover>, } #[derive(Debug, EnumString, Display)] diff --git a/core/lib/dal/src/fri_protocol_versions_dal.rs b/prover/prover_dal/src/fri_protocol_versions_dal.rs similarity index 92% rename from core/lib/dal/src/fri_protocol_versions_dal.rs rename to prover/prover_dal/src/fri_protocol_versions_dal.rs index d982d85771e9..acf4a35d1d4c 100644 --- a/core/lib/dal/src/fri_protocol_versions_dal.rs +++ b/prover/prover_dal/src/fri_protocol_versions_dal.rs @@ -1,12 +1,13 @@ use std::convert::TryFrom; -use zksync_types::protocol_version::{FriProtocolVersionId, L1VerifierConfig}; +use zksync_basic_types::protocol_version::{FriProtocolVersionId, L1VerifierConfig}; +use zksync_db_connection::processor::StorageProcessor; -use crate::StorageProcessor; +use crate::Prover; #[derive(Debug)] pub struct FriProtocolVersionsDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub storage: &'a mut StorageProcessor<'c, Prover>, } impl FriProtocolVersionsDal<'_, '_> { diff --git a/core/lib/dal/src/fri_prover_dal.rs b/prover/prover_dal/src/fri_prover_dal.rs similarity index 72% rename from core/lib/dal/src/fri_prover_dal.rs rename to prover/prover_dal/src/fri_prover_dal.rs index f9ba167e72cb..e7eba9f11ba5 100644 --- a/core/lib/dal/src/fri_prover_dal.rs +++ b/prover/prover_dal/src/fri_prover_dal.rs @@ -1,244 +1,21 @@ #![doc = include_str!("../doc/FriProverDal.md")] use std::{collections::HashMap, convert::TryFrom, time::Duration}; -use zksync_types::{ +use zksync_basic_types::{ basic_fri_types::{AggregationRound, CircuitIdRoundTuple}, protocol_version::FriProtocolVersionId, + prover_dal::{FriProverJobMetadata, JobCountStatistics, StuckJobs, EIP_4844_CIRCUIT_ID}, L1BatchNumber, }; - -use self::types::{FriProverJobMetadata, JobCountStatistics, StuckJobs}; -use crate::{ - instrument::InstrumentExt, - metrics::MethodLatency, - time_utils::{duration_to_naive_time, pg_interval_from_duration}, - StorageProcessor, +use zksync_db_connection::{ + instrument::InstrumentExt, metrics::MethodLatency, processor::StorageProcessor, }; -// TODO (PLA-775): Should not be an embedded submodule in a concrete DAL file. -pub mod types { - //! Types exposed by the prover DAL for general-purpose use. - - use std::{net::IpAddr, ops::Add}; - - use sqlx::types::chrono::{DateTime, Utc}; - use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; - - // This currently lives in `zksync_prover_types` -- we don't want a dependency between prover types (`zkevm_test_harness`) and DAL. - // This will be gone as part of 1.5.0, when EIP4844 becomes normal jobs, rather than special cased ones. - pub(crate) const EIP_4844_CIRCUIT_ID: u8 = 255; - - #[derive(Debug, Clone)] - pub struct FriProverJobMetadata { - pub id: u32, - pub block_number: L1BatchNumber, - pub circuit_id: u8, - pub aggregation_round: AggregationRound, - pub sequence_number: usize, - pub depth: u16, - pub is_node_final_proof: bool, - } - - #[derive(Debug, Clone, Copy, Default)] - pub struct JobCountStatistics { - pub queued: usize, - pub in_progress: usize, - pub failed: usize, - pub successful: usize, - } - - impl Add for JobCountStatistics { - type Output = JobCountStatistics; - - fn add(self, rhs: Self) -> Self::Output { - Self { - queued: self.queued + rhs.queued, - in_progress: self.in_progress + rhs.in_progress, - failed: self.failed + rhs.failed, - successful: self.successful + rhs.successful, - } - } - } - - #[derive(Debug)] - pub struct StuckJobs { - pub id: u64, - pub status: String, - pub attempts: u64, - } - - // TODO (PLA-774): Redundant structure, should be replaced with `std::net::SocketAddr`. - #[derive(Debug, Clone)] - pub struct SocketAddress { - pub host: IpAddr, - pub port: u16, - } - - impl From for std::net::SocketAddr { - fn from(socket_address: SocketAddress) -> Self { - Self::new(socket_address.host, socket_address.port) - } - } - - impl From for SocketAddress { - fn from(socket_address: std::net::SocketAddr) -> Self { - Self { - host: socket_address.ip(), - port: socket_address.port(), - } - } - } - - #[derive(Debug, Clone)] - pub struct LeafAggregationJobMetadata { - pub id: u32, - pub block_number: L1BatchNumber, - pub circuit_id: u8, - pub prover_job_ids_for_proofs: Vec, - } - - #[derive(Debug, Clone)] - pub struct NodeAggregationJobMetadata { - pub id: u32, - pub block_number: L1BatchNumber, - pub circuit_id: u8, - pub depth: u16, - pub prover_job_ids_for_proofs: Vec, - } - - #[derive(Debug)] - pub struct JobPosition { - pub aggregation_round: AggregationRound, - pub sequence_number: usize, - } - - #[derive(Debug, Default)] - pub struct ProverJobStatusFailed { - pub started_at: DateTime, - pub error: String, - } - - #[derive(Debug)] - pub struct ProverJobStatusSuccessful { - pub started_at: DateTime, - pub time_taken: chrono::Duration, - } - - impl Default for ProverJobStatusSuccessful { - fn default() -> Self { - ProverJobStatusSuccessful { - started_at: DateTime::default(), - time_taken: chrono::Duration::zero(), - } - } - } - - #[derive(Debug, Default)] - pub struct ProverJobStatusInProgress { - pub started_at: DateTime, - } - - #[derive(Debug)] - pub struct WitnessJobStatusSuccessful { - pub started_at: DateTime, - pub time_taken: chrono::Duration, - } - - impl Default for WitnessJobStatusSuccessful { - fn default() -> Self { - WitnessJobStatusSuccessful { - started_at: DateTime::default(), - time_taken: chrono::Duration::zero(), - } - } - } - - #[derive(Debug, Default)] - pub struct WitnessJobStatusFailed { - pub started_at: DateTime, - pub error: String, - } - - #[derive(Debug, strum::Display, strum::EnumString, strum::AsRefStr)] - pub enum ProverJobStatus { - #[strum(serialize = "queued")] - Queued, - #[strum(serialize = "in_progress")] - InProgress(ProverJobStatusInProgress), - #[strum(serialize = "successful")] - Successful(ProverJobStatusSuccessful), - #[strum(serialize = "failed")] - Failed(ProverJobStatusFailed), - #[strum(serialize = "skipped")] - Skipped, - #[strum(serialize = "ignored")] - Ignored, - } - - #[derive(Debug, strum::Display, strum::EnumString, strum::AsRefStr)] - pub enum WitnessJobStatus { - #[strum(serialize = "failed")] - Failed(WitnessJobStatusFailed), - #[strum(serialize = "skipped")] - Skipped, - #[strum(serialize = "successful")] - Successful(WitnessJobStatusSuccessful), - #[strum(serialize = "waiting_for_artifacts")] - WaitingForArtifacts, - #[strum(serialize = "waiting_for_proofs")] - WaitingForProofs, - #[strum(serialize = "in_progress")] - InProgress, - #[strum(serialize = "queued")] - Queued, - } - - #[derive(Debug)] - pub struct WitnessJobInfo { - pub block_number: L1BatchNumber, - pub created_at: DateTime, - pub updated_at: DateTime, - pub status: WitnessJobStatus, - pub position: JobPosition, - } - - #[derive(Debug)] - pub struct ProverJobInfo { - pub id: u32, - pub block_number: L1BatchNumber, - pub circuit_type: String, - pub position: JobPosition, - pub input_length: u64, - pub status: ProverJobStatus, - pub attempts: u32, - pub created_at: DateTime, - pub updated_at: DateTime, - } - - #[derive(Debug)] - pub struct JobExtendedStatistics { - pub successful_padding: L1BatchNumber, - pub queued_padding: L1BatchNumber, - pub queued_padding_len: u32, - pub active_area: Vec, - } - - #[derive(Debug, Copy, Clone)] - pub enum GpuProverInstanceStatus { - // The instance is available for processing. - Available, - // The instance is running at full capacity. - Full, - // The instance is reserved by an synthesizer. - Reserved, - // The instance is not alive anymore. - Dead, - } -} +use crate::{duration_to_naive_time, pg_interval_from_duration, Prover}; #[derive(Debug)] pub struct FriProverDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Prover>, } impl FriProverDal<'_, '_> { @@ -257,7 +34,7 @@ impl FriProverDal<'_, '_> { // EIP 4844 are special cased. // There exist only 2 blobs that are calculated at basic layer and injected straight into scheduler proof (as of 1.4.2). // As part of 1.5.0, these will be treated as regular circuits, having basic, leaf, node and finally being attached as regular node proofs to the scheduler. - let is_node_final_proof = *circuit_id == types::EIP_4844_CIRCUIT_ID; + let is_node_final_proof = *circuit_id == EIP_4844_CIRCUIT_ID; self.insert_prover_job( l1_batch_number, *circuit_id, diff --git a/core/lib/dal/src/fri_scheduler_dependency_tracker_dal.rs b/prover/prover_dal/src/fri_scheduler_dependency_tracker_dal.rs similarity index 94% rename from core/lib/dal/src/fri_scheduler_dependency_tracker_dal.rs rename to prover/prover_dal/src/fri_scheduler_dependency_tracker_dal.rs index b6f2058f6cf9..0182e0388779 100644 --- a/core/lib/dal/src/fri_scheduler_dependency_tracker_dal.rs +++ b/prover/prover_dal/src/fri_scheduler_dependency_tracker_dal.rs @@ -1,10 +1,13 @@ -use zksync_types::{basic_fri_types::FinalProofIds, L1BatchNumber}; +use zksync_basic_types::{ + basic_fri_types::FinalProofIds, prover_dal::EIP_4844_CIRCUIT_ID, L1BatchNumber, +}; +use zksync_db_connection::processor::StorageProcessor; -use crate::{fri_prover_dal::types, StorageProcessor}; +use crate::Prover; #[derive(Debug)] pub struct FriSchedulerDependencyTrackerDal<'a, 'c> { - pub storage: &'a mut StorageProcessor<'c>, + pub storage: &'a mut StorageProcessor<'c, Prover>, } impl FriSchedulerDependencyTrackerDal<'_, '_> { @@ -76,7 +79,7 @@ impl FriSchedulerDependencyTrackerDal<'_, '_> { // This will be changed when 1.5.0 will land and there will be a single node proof for blobs. blob_ordering: usize, ) { - let query = if circuit_id != types::EIP_4844_CIRCUIT_ID { + let query = if circuit_id != EIP_4844_CIRCUIT_ID { format!( r#" UPDATE scheduler_dependency_tracker_fri diff --git a/core/lib/dal/src/fri_witness_generator_dal.rs b/prover/prover_dal/src/fri_witness_generator_dal.rs similarity index 99% rename from core/lib/dal/src/fri_witness_generator_dal.rs rename to prover/prover_dal/src/fri_witness_generator_dal.rs index 318b470d2868..f9be7912ebdc 100644 --- a/core/lib/dal/src/fri_witness_generator_dal.rs +++ b/prover/prover_dal/src/fri_witness_generator_dal.rs @@ -2,24 +2,21 @@ use std::{collections::HashMap, convert::TryFrom, time::Duration}; use sqlx::Row; -use zksync_types::{ +use zksync_basic_types::{ basic_fri_types::{AggregationRound, Eip4844Blobs}, protocol_version::FriProtocolVersionId, - L1BatchNumber, -}; - -use crate::{ - fri_prover_dal::types::{ + prover_dal::{ JobCountStatistics, LeafAggregationJobMetadata, NodeAggregationJobMetadata, StuckJobs, }, - metrics::MethodLatency, - time_utils::{duration_to_naive_time, pg_interval_from_duration}, - StorageProcessor, + L1BatchNumber, }; +use zksync_db_connection::{metrics::MethodLatency, processor::StorageProcessor}; + +use crate::{duration_to_naive_time, pg_interval_from_duration, Prover}; #[derive(Debug)] pub struct FriWitnessGeneratorDal<'a, 'c> { - pub(crate) storage: &'a mut StorageProcessor<'c>, + pub(crate) storage: &'a mut StorageProcessor<'c, Prover>, } #[derive(Debug, strum::Display, strum::EnumString, strum::AsRefStr)] diff --git a/prover/prover_dal/src/lib.rs b/prover/prover_dal/src/lib.rs index 8b137891791f..af030f7d0dac 100644 --- a/prover/prover_dal/src/lib.rs +++ b/prover/prover_dal/src/lib.rs @@ -1 +1,79 @@ +use zksync_db_connection::processor::StorageMarker; +pub use zksync_db_connection::{ + connection::ConnectionPool, + processor::StorageProcessor, + utils::{duration_to_naive_time, pg_interval_from_duration}, +}; +use crate::{ + fri_gpu_prover_queue_dal::FriGpuProverQueueDal, + fri_proof_compressor_dal::FriProofCompressorDal, + fri_protocol_versions_dal::FriProtocolVersionsDal, fri_prover_dal::FriProverDal, + fri_scheduler_dependency_tracker_dal::FriSchedulerDependencyTrackerDal, + fri_witness_generator_dal::FriWitnessGeneratorDal, +}; + +pub mod fri_gpu_prover_queue_dal; +pub mod fri_proof_compressor_dal; +pub mod fri_protocol_versions_dal; +pub mod fri_prover_dal; +pub mod fri_scheduler_dependency_tracker_dal; +pub mod fri_witness_generator_dal; + +// This module is private and serves as a way to seal the trait. +mod private { + pub trait Sealed {} +} + +// Here we are making the trait sealed, because it should be public to function correctly, but we don't +// want to allow any other downstream implementations of this trait. +pub trait ProverDals<'a>: private::Sealed +where + Self: 'a, +{ + fn fri_witness_generator_dal(&mut self) -> FriWitnessGeneratorDal<'_, 'a>; + + fn fri_prover_jobs_dal(&mut self) -> FriProverDal<'_, 'a>; + + fn fri_scheduler_dependency_tracker_dal(&mut self) -> FriSchedulerDependencyTrackerDal<'_, 'a>; + + fn fri_gpu_prover_queue_dal(&mut self) -> FriGpuProverQueueDal<'_, 'a>; + + fn fri_protocol_versions_dal(&mut self) -> FriProtocolVersionsDal<'_, 'a>; + + fn fri_proof_compressor_dal(&mut self) -> FriProofCompressorDal<'_, 'a>; +} + +#[derive(Clone, Debug)] +pub struct Prover; + +// Implement the marker trait for the Prover to be able to use it in StorageProcessor. +impl StorageMarker for Prover {} +// Implement the sealed trait for the StorageProcessor. +impl private::Sealed for StorageProcessor<'_, Prover> {} + +impl<'a> ProverDals<'a> for StorageProcessor<'a, Prover> { + fn fri_witness_generator_dal(&mut self) -> FriWitnessGeneratorDal<'_, 'a> { + FriWitnessGeneratorDal { storage: self } + } + + fn fri_prover_jobs_dal(&mut self) -> FriProverDal<'_, 'a> { + FriProverDal { storage: self } + } + + fn fri_scheduler_dependency_tracker_dal(&mut self) -> FriSchedulerDependencyTrackerDal<'_, 'a> { + FriSchedulerDependencyTrackerDal { storage: self } + } + + fn fri_gpu_prover_queue_dal(&mut self) -> FriGpuProverQueueDal<'_, 'a> { + FriGpuProverQueueDal { storage: self } + } + + fn fri_protocol_versions_dal(&mut self) -> FriProtocolVersionsDal<'_, 'a> { + FriProtocolVersionsDal { storage: self } + } + + fn fri_proof_compressor_dal(&mut self) -> FriProofCompressorDal<'_, 'a> { + FriProofCompressorDal { storage: self } + } +} diff --git a/prover/prover_fri/Cargo.toml b/prover/prover_fri/Cargo.toml index 67ee03bcf1f2..1b64b47410ef 100644 --- a/prover/prover_fri/Cargo.toml +++ b/prover/prover_fri/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" } zksync_types = { path = "../../core/lib/types" } -zksync_dal = { path = "../../core/lib/dal" } +prover_dal = { path = "../prover_dal" } zksync_config = { path = "../../core/lib/config" } zksync_env_config = { path = "../../core/lib/env_config" } prometheus_exporter = { path = "../../core/lib/prometheus_exporter" } diff --git a/prover/prover_fri/src/gpu_prover_job_processor.rs b/prover/prover_fri/src/gpu_prover_job_processor.rs index e758a0cbd584..481dda7e4f9b 100644 --- a/prover/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/prover_fri/src/gpu_prover_job_processor.rs @@ -3,12 +3,12 @@ pub mod gpu_prover { use std::{collections::HashMap, sync::Arc, time::Instant}; use anyhow::Context as _; + use prover_dal::{ConnectionPool, ProverDals}; use shivini::{ gpu_proof_config::GpuProofConfig, gpu_prove_from_external_witness_data, ProverContext, }; use tokio::task::JoinHandle; use zksync_config::configs::{fri_prover_group::FriProverGroupConfig, FriProverConfig}; - use zksync_dal::{fri_prover_dal::types::SocketAddress, ConnectionPool}; use zksync_env_config::FromEnv; use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ @@ -29,7 +29,7 @@ pub mod gpu_prover { CircuitWrapper, FriProofWrapper, ProverServiceDataKey, WitnessVectorArtifacts, }; use zksync_queued_job_processor::{async_trait, JobProcessor}; - use zksync_types::basic_fri_types::CircuitIdRoundTuple; + use zksync_types::{basic_fri_types::CircuitIdRoundTuple, prover_dal::SocketAddress}; use zksync_vk_setup_data_server_fri::{keystore::Keystore, GoldilocksGpuProverSetupData}; use crate::{ @@ -53,7 +53,7 @@ pub mod gpu_prover { blob_store: Arc, public_blob_store: Option>, config: Arc, - prover_connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, setup_load_mode: SetupLoadMode, // Only pick jobs for the configured circuit id and aggregation rounds. // Empty means all jobs are picked. @@ -70,7 +70,7 @@ pub mod gpu_prover { blob_store: Arc, public_blob_store: Option>, config: FriProverConfig, - prover_connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, setup_load_mode: SetupLoadMode, circuit_ids_for_round_to_be_proven: Vec, witness_vector_queue: SharedWitnessVectorQueue, diff --git a/prover/prover_fri/src/main.rs b/prover/prover_fri/src/main.rs index 53e22b7563e3..dc1a459253bf 100644 --- a/prover/prover_fri/src/main.rs +++ b/prover/prover_fri/src/main.rs @@ -4,6 +4,7 @@ use std::{future::Future, sync::Arc}; use anyhow::Context as _; use local_ip_address::local_ip; use prometheus_exporter::PrometheusExporterConfig; +use prover_dal::{ConnectionPool, Prover, ProverDals}; use tokio::{ sync::{oneshot, watch::Receiver}, task::JoinHandle, @@ -11,10 +12,6 @@ use tokio::{ use zksync_config::configs::{ fri_prover_group::FriProverGroupConfig, FriProverConfig, ObservabilityConfig, PostgresConfig, }; -use zksync_dal::{ - fri_prover_dal::types::{GpuProverInstanceStatus, SocketAddress}, - ConnectionPool, -}; use zksync_env_config::{ object_store::{ProverObjectStoreConfig, PublicObjectStoreConfig}, FromEnv, @@ -22,7 +19,10 @@ use zksync_env_config::{ use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_prover_fri_utils::{get_all_circuit_id_round_tuples_for, region_fetcher::get_zone}; use zksync_queued_job_processor::JobProcessor; -use zksync_types::basic_fri_types::CircuitIdRoundTuple; +use zksync_types::{ + basic_fri_types::CircuitIdRoundTuple, + prover_dal::{GpuProverInstanceStatus, SocketAddress}, +}; use zksync_utils::wait_for_tasks::wait_for_tasks; mod gpu_prover_job_processor; @@ -33,7 +33,7 @@ mod utils; async fn graceful_shutdown(port: u16) -> anyhow::Result> { let postgres_config = PostgresConfig::from_env().context("PostgresConfig::from_env()")?; - let pool = ConnectionPool::singleton(postgres_config.prover_url()?) + let pool = ConnectionPool::::singleton(postgres_config.prover_url()?) .build() .await .context("failed to build a connection pool")?; @@ -171,7 +171,7 @@ async fn get_prover_tasks( stop_receiver: Receiver, store_factory: ObjectStoreFactory, public_blob_store: Option>, - pool: ConnectionPool, + pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, ) -> anyhow::Result>>> { use zksync_vk_setup_data_server_fri::commitment_utils::get_cached_commitments; @@ -205,7 +205,7 @@ async fn get_prover_tasks( stop_receiver: Receiver, store_factory: ObjectStoreFactory, public_blob_store: Option>, - pool: ConnectionPool, + pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, ) -> anyhow::Result>>> { use gpu_prover_job_processor::gpu_prover; diff --git a/prover/prover_fri/src/prover_job_processor.rs b/prover/prover_fri/src/prover_job_processor.rs index e1e5fdebef6d..6067496b4953 100644 --- a/prover/prover_fri/src/prover_job_processor.rs +++ b/prover/prover_fri/src/prover_job_processor.rs @@ -2,12 +2,12 @@ use std::{collections::HashMap, sync::Arc, time::Instant}; use anyhow::Context as _; use circuit_definitions::{circuit_definitions::eip4844::EIP4844Circuit, eip4844_proof_config}; +use prover_dal::{ConnectionPool, ProverDals}; use tokio::task::JoinHandle; use zkevm_test_harness::prover_utils::{ prove_base_layer_circuit, prove_eip4844_circuit, prove_recursion_layer_circuit, }; use zksync_config::configs::{fri_prover_group::FriProverGroupConfig, FriProverConfig}; -use zksync_dal::ConnectionPool; use zksync_env_config::FromEnv; use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ @@ -44,7 +44,7 @@ pub struct Prover { blob_store: Arc, public_blob_store: Option>, config: Arc, - prover_connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, setup_load_mode: SetupLoadMode, // Only pick jobs for the configured circuit id and aggregation rounds. // Empty means all jobs are picked. @@ -58,7 +58,7 @@ impl Prover { blob_store: Arc, public_blob_store: Option>, config: FriProverConfig, - prover_connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, setup_load_mode: SetupLoadMode, circuit_ids_for_round_to_be_proven: Vec, vk_commitments: L1VerifierConfig, diff --git a/prover/prover_fri/src/socket_listener.rs b/prover/prover_fri/src/socket_listener.rs index 5bf4f587b2d0..75f5f4dab46a 100644 --- a/prover/prover_fri/src/socket_listener.rs +++ b/prover/prover_fri/src/socket_listener.rs @@ -3,17 +3,15 @@ pub mod gpu_socket_listener { use std::{net::SocketAddr, time::Instant}; use anyhow::Context as _; + use prover_dal::{ConnectionPool, Prover, ProverDals}; use tokio::{ io::copy, net::{TcpListener, TcpStream}, sync::watch, }; - use zksync_dal::{ - fri_prover_dal::types::{GpuProverInstanceStatus, SocketAddress}, - ConnectionPool, - }; use zksync_object_store::bincode; use zksync_prover_fri_types::WitnessVectorArtifacts; + use zksync_types::prover_dal::{GpuProverInstanceStatus, SocketAddress}; use crate::{ metrics::METRICS, @@ -23,7 +21,7 @@ pub mod gpu_socket_listener { pub(crate) struct SocketListener { address: SocketAddress, queue: SharedWitnessVectorQueue, - pool: ConnectionPool, + pool: ConnectionPool, specialized_prover_group_id: u8, zone: String, } @@ -32,7 +30,7 @@ pub mod gpu_socket_listener { pub fn new( address: SocketAddress, queue: SharedWitnessVectorQueue, - pool: ConnectionPool, + pool: ConnectionPool, specialized_prover_group_id: u8, zone: String, ) -> Self { diff --git a/prover/prover_fri/src/utils.rs b/prover/prover_fri/src/utils.rs index 05f2f872ca8e..86f24f9f40b0 100644 --- a/prover/prover_fri/src/utils.rs +++ b/prover/prover_fri/src/utils.rs @@ -2,11 +2,11 @@ use std::{sync::Arc, time::Instant}; +use prover_dal::{Prover, ProverDals, StorageProcessor}; use tokio::sync::Mutex; use zkevm_test_harness::prover_utils::{ verify_base_layer_proof, verify_eip4844_proof, verify_recursion_layer_proof, }; -use zksync_dal::StorageProcessor; use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ circuit_definitions::{ @@ -66,7 +66,7 @@ pub async fn save_proof( blob_store: &dyn ObjectStore, public_blob_store: Option<&dyn ObjectStore>, shall_save_to_public_bucket: bool, - storage_processor: &mut StorageProcessor<'_>, + storage_processor: &mut StorageProcessor<'_, Prover>, ) { tracing::info!( "Successfully proven job: {}, total time taken: {:?}", diff --git a/prover/prover_fri_gateway/Cargo.toml b/prover/prover_fri_gateway/Cargo.toml index af52e91c7c47..48d61d27149d 100644 --- a/prover/prover_fri_gateway/Cargo.toml +++ b/prover/prover_fri_gateway/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" } zksync_types = { path = "../../core/lib/types" } -zksync_dal = { path = "../../core/lib/dal" } +prover_dal = { path = "../prover_dal" } zksync_config = { path = "../../core/lib/config" } zksync_env_config = { path = "../../core/lib/env_config" } zksync_object_store = { path = "../../core/lib/object_store" } diff --git a/prover/prover_fri_gateway/src/api_data_fetcher.rs b/prover/prover_fri_gateway/src/api_data_fetcher.rs index 9e0277d1ea89..6a95acc0cd0b 100644 --- a/prover/prover_fri_gateway/src/api_data_fetcher.rs +++ b/prover/prover_fri_gateway/src/api_data_fetcher.rs @@ -1,10 +1,10 @@ use std::{sync::Arc, time::Duration}; use async_trait::async_trait; +use prover_dal::{ConnectionPool, Prover}; use reqwest::Client; use serde::{de::DeserializeOwned, Serialize}; use tokio::{sync::watch, time::sleep}; -use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; use crate::metrics::METRICS; @@ -17,7 +17,7 @@ pub(crate) const SUBMIT_PROOF_PATH: &str = "/submit_proof"; pub(crate) struct PeriodicApiStruct { pub(crate) blob_store: Arc, - pub(crate) pool: ConnectionPool, + pub(crate) pool: ConnectionPool, pub(crate) api_url: String, pub(crate) poll_duration: Duration, pub(crate) client: Client, diff --git a/prover/prover_fri_gateway/src/main.rs b/prover/prover_fri_gateway/src/main.rs index 9c73a8be6f1d..aefc191ab938 100644 --- a/prover/prover_fri_gateway/src/main.rs +++ b/prover/prover_fri_gateway/src/main.rs @@ -1,9 +1,9 @@ use anyhow::Context as _; use prometheus_exporter::PrometheusExporterConfig; +use prover_dal::{ConnectionPool, Prover}; use reqwest::Client; use tokio::sync::{oneshot, watch}; use zksync_config::configs::{FriProverGatewayConfig, ObservabilityConfig, PostgresConfig}; -use zksync_dal::ConnectionPool; use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; use zksync_prover_interface::api::{ProofGenerationDataRequest, SubmitProofRequest}; @@ -37,7 +37,7 @@ async fn main() -> anyhow::Result<()> { let config = FriProverGatewayConfig::from_env().context("FriProverGatewayConfig::from_env()")?; let postgres_config = PostgresConfig::from_env().context("PostgresConfig::from_env()")?; - let pool = ConnectionPool::builder( + let pool = ConnectionPool::::builder( postgres_config.prover_url()?, postgres_config.max_connections()?, ) diff --git a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs index 7c7374830d6a..926177dcf77f 100644 --- a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs +++ b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs @@ -1,4 +1,5 @@ use async_trait::async_trait; +use prover_dal::ProverDals; use zksync_prover_interface::api::{ ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, }; diff --git a/prover/prover_fri_gateway/src/proof_submitter.rs b/prover/prover_fri_gateway/src/proof_submitter.rs index 3af3e81e20fb..add9657c48a0 100644 --- a/prover/prover_fri_gateway/src/proof_submitter.rs +++ b/prover/prover_fri_gateway/src/proof_submitter.rs @@ -1,5 +1,5 @@ use async_trait::async_trait; -use zksync_dal::fri_proof_compressor_dal::ProofCompressionJobStatus; +use prover_dal::{fri_proof_compressor_dal::ProofCompressionJobStatus, ProverDals}; use zksync_prover_interface::api::{SubmitProofRequest, SubmitProofResponse}; use zksync_types::L1BatchNumber; diff --git a/prover/prover_fri_utils/Cargo.toml b/prover/prover_fri_utils/Cargo.toml index e2023f92b2dc..10d4c4737faa 100644 --- a/prover/prover_fri_utils/Cargo.toml +++ b/prover/prover_fri_utils/Cargo.toml @@ -12,7 +12,7 @@ zksync_object_store = { path = "../../core/lib/object_store" } zksync_config = { path = "../../core/lib/config" } zksync_types = { path = "../../core/lib/types" } zksync_prover_fri_types = { path = "../prover_fri_types" } -zksync_dal = { path = "../../core/lib/dal" } +prover_dal = { path = "../prover_dal" } zksync_utils = { path = "../../core/lib/utils" } tracing = "0.1" diff --git a/prover/prover_fri_utils/src/lib.rs b/prover/prover_fri_utils/src/lib.rs index 4f17fb970810..66e30876f92e 100644 --- a/prover/prover_fri_utils/src/lib.rs +++ b/prover/prover_fri_utils/src/lib.rs @@ -1,6 +1,6 @@ use std::time::Instant; -use zksync_dal::StorageProcessor; +use prover_dal::{Prover, ProverDals, StorageProcessor}; use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ circuit_definitions::{ @@ -25,7 +25,7 @@ pub mod region_fetcher; pub mod socket_utils; pub async fn fetch_next_circuit( - storage: &mut StorageProcessor<'_>, + storage: &mut StorageProcessor<'_, Prover>, blob_store: &dyn ObjectStore, circuit_ids_for_round_to_be_proven: &Vec, vk_commitments: &L1VerifierConfig, diff --git a/prover/witness_generator/Cargo.toml b/prover/witness_generator/Cargo.toml index 0699693367c0..bb6348dd2096 100644 --- a/prover/witness_generator/Cargo.toml +++ b/prover/witness_generator/Cargo.toml @@ -13,6 +13,7 @@ publish = false # We don't want to publish our binaries. [dependencies] vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" } +prover_dal = { path = "../prover_dal" } zksync_dal = { path = "../../core/lib/dal" } zksync_config = { path = "../../core/lib/config" } zksync_prover_interface = { path = "../../core/lib/prover_interface" } diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index 8e9bd5f2735c..af9c674c43df 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -18,6 +18,9 @@ use circuit_definitions::{ use multivm::vm_latest::{ constants::MAX_CYCLES_FOR_TX, HistoryDisabled, StorageOracle as VmStorageOracle, }; +use prover_dal::{ + fri_witness_generator_dal::FriWitnessJobStatus, ConnectionPool, Prover, ProverDals, +}; use rand::Rng; use serde::{Deserialize, Serialize}; use zkevm_test_harness::{ @@ -26,7 +29,7 @@ use zkevm_test_harness::{ zkevm_circuits::eip_4844::input::EIP4844OutputDataWitness, }; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::{fri_witness_generator_dal::FriWitnessJobStatus, ConnectionPool}; +use zksync_dal::{Server, ServerDals}; use zksync_object_store::{Bucket, ObjectStore, ObjectStoreFactory, StoredObject}; use zksync_prover_fri_types::{ circuit_definitions::{ @@ -102,8 +105,8 @@ pub struct BasicWitnessGenerator { config: Arc, object_store: Arc, public_blob_store: Option>, - connection_pool: ConnectionPool, - prover_connection_pool: ConnectionPool, + connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, protocol_versions: Vec, } @@ -112,8 +115,8 @@ impl BasicWitnessGenerator { config: FriWitnessGeneratorConfig, store_factory: &ObjectStoreFactory, public_blob_store: Option>, - connection_pool: ConnectionPool, - prover_connection_pool: ConnectionPool, + connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, protocol_versions: Vec, ) -> Self { Self { @@ -128,8 +131,8 @@ impl BasicWitnessGenerator { async fn process_job_impl( object_store: Arc, - connection_pool: ConnectionPool, - prover_connection_pool: ConnectionPool, + connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, basic_job: BasicWitnessGeneratorJob, started_at: Instant, config: Arc, @@ -328,7 +331,7 @@ impl JobProcessor for BasicWitnessGenerator { async fn process_basic_circuits_job( object_store: &dyn ObjectStore, config: Arc, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, started_at: Instant, block_number: L1BatchNumber, job: PrepareBasicCircuitsJob, @@ -364,7 +367,7 @@ async fn process_basic_circuits_job( } async fn update_database( - prover_connection_pool: &ConnectionPool, + prover_connection_pool: &ConnectionPool, started_at: Instant, block_number: L1BatchNumber, blob_urls: BlobUrls, @@ -484,7 +487,7 @@ async fn save_recursion_queue( // If making changes to this method, consider moving this logic to the DAL layer and make // `PrepareBasicCircuitsJob` have all fields of `BasicCircuitWitnessGeneratorInput`. async fn build_basic_circuits_witness_generator_input( - connection_pool: &ConnectionPool, + connection_pool: &ConnectionPool, witness_merkle_input: PrepareBasicCircuitsJob, l1_batch_number: L1BatchNumber, ) -> BasicCircuitWitnessGeneratorInput { @@ -528,7 +531,7 @@ async fn generate_witness( block_number: L1BatchNumber, object_store: &dyn ObjectStore, config: Arc, - connection_pool: ConnectionPool, + connection_pool: ConnectionPool, input: BasicCircuitWitnessGeneratorInput, eip_4844_blobs: Eip4844Blobs, ) -> ( diff --git a/prover/witness_generator/src/leaf_aggregation.rs b/prover/witness_generator/src/leaf_aggregation.rs index cd5ac65507db..13ab1475c692 100644 --- a/prover/witness_generator/src/leaf_aggregation.rs +++ b/prover/witness_generator/src/leaf_aggregation.rs @@ -2,11 +2,12 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; use async_trait::async_trait; +use prover_dal::{Prover, ProverDals}; use zkevm_test_harness::witness::recursive_aggregation::{ compute_leaf_params, create_leaf_witnesses, }; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::{fri_prover_dal::types::LeafAggregationJobMetadata, ConnectionPool}; +use zksync_dal::ConnectionPool; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_prover_fri_types::{ circuit_definitions::{ @@ -28,7 +29,8 @@ use zksync_prover_fri_types::{ use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ - basic_fri_types::AggregationRound, protocol_version::FriProtocolVersionId, L1BatchNumber, + basic_fri_types::AggregationRound, protocol_version::FriProtocolVersionId, + prover_dal::LeafAggregationJobMetadata, L1BatchNumber, }; use zksync_vk_setup_data_server_fri::keystore::Keystore; @@ -71,7 +73,7 @@ pub struct LeafAggregationWitnessGeneratorJob { pub struct LeafAggregationWitnessGenerator { config: FriWitnessGeneratorConfig, object_store: Arc, - prover_connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, protocol_versions: Vec, } @@ -79,7 +81,7 @@ impl LeafAggregationWitnessGenerator { pub async fn new( config: FriWitnessGeneratorConfig, store_factory: &ObjectStoreFactory, - prover_connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, protocol_versions: Vec, ) -> Self { Self { @@ -281,7 +283,7 @@ pub fn process_leaf_aggregation_job( } async fn update_database( - prover_connection_pool: &ConnectionPool, + prover_connection_pool: &ConnectionPool, started_at: Instant, block_number: L1BatchNumber, job_id: u32, diff --git a/prover/witness_generator/src/main.rs b/prover/witness_generator/src/main.rs index 8d89f8df862b..429631d9180c 100644 --- a/prover/witness_generator/src/main.rs +++ b/prover/witness_generator/src/main.rs @@ -5,13 +5,13 @@ use std::time::Instant; use anyhow::{anyhow, Context as _}; use futures::{channel::mpsc, executor::block_on, SinkExt}; use prometheus_exporter::PrometheusExporterConfig; +use prover_dal::{ConnectionPool, Prover, ProverDals}; use structopt::StructOpt; use tokio::sync::watch; use zksync_config::{ configs::{FriWitnessGeneratorConfig, ObservabilityConfig, PostgresConfig, PrometheusConfig}, ObjectStoreConfig, }; -use zksync_dal::ConnectionPool; use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; use zksync_queued_job_processor::JobProcessor; @@ -36,6 +36,7 @@ mod utils; #[cfg(not(target_env = "msvc"))] use jemallocator::Jemalloc; +use zksync_dal::Server; #[cfg(not(target_env = "msvc"))] #[global_allocator] @@ -96,14 +97,14 @@ async fn main() -> anyhow::Result<()> { FriWitnessGeneratorConfig::from_env().context("FriWitnessGeneratorConfig::from_env()")?; let prometheus_config = PrometheusConfig::from_env().context("PrometheusConfig::from_env()")?; let postgres_config = PostgresConfig::from_env().context("PostgresConfig::from_env()")?; - let connection_pool = ConnectionPool::builder( + let connection_pool = ConnectionPool::::builder( postgres_config.master_url()?, postgres_config.max_connections()?, ) .build() .await .context("failed to build a connection_pool")?; - let prover_connection_pool = ConnectionPool::singleton(postgres_config.prover_url()?) + let prover_connection_pool = ConnectionPool::::singleton(postgres_config.prover_url()?) .build() .await .context("failed to build a prover_connection_pool")?; diff --git a/prover/witness_generator/src/node_aggregation.rs b/prover/witness_generator/src/node_aggregation.rs index 58508c768bfc..b479de99eeb2 100644 --- a/prover/witness_generator/src/node_aggregation.rs +++ b/prover/witness_generator/src/node_aggregation.rs @@ -2,11 +2,12 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; use async_trait::async_trait; +use prover_dal::{Prover, ProverDals}; use zkevm_test_harness::witness::recursive_aggregation::{ compute_node_vk_commitment, create_node_witnesses, }; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::{fri_prover_dal::types::NodeAggregationJobMetadata, ConnectionPool}; +use zksync_dal::ConnectionPool; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_prover_fri_types::{ circuit_definitions::{ @@ -24,7 +25,8 @@ use zksync_prover_fri_types::{ }; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ - basic_fri_types::AggregationRound, protocol_version::FriProtocolVersionId, L1BatchNumber, + basic_fri_types::AggregationRound, protocol_version::FriProtocolVersionId, + prover_dal::NodeAggregationJobMetadata, L1BatchNumber, }; use zksync_vk_setup_data_server_fri::{keystore::Keystore, utils::get_leaf_vk_params}; @@ -73,7 +75,7 @@ pub struct NodeAggregationWitnessGeneratorJob { pub struct NodeAggregationWitnessGenerator { config: FriWitnessGeneratorConfig, object_store: Arc, - prover_connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, protocol_versions: Vec, } @@ -81,7 +83,7 @@ impl NodeAggregationWitnessGenerator { pub async fn new( config: FriWitnessGeneratorConfig, store_factory: &ObjectStoreFactory, - prover_connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, protocol_versions: Vec, ) -> Self { Self { @@ -280,7 +282,7 @@ pub async fn prepare_job( #[allow(clippy::too_many_arguments)] async fn update_database( - prover_connection_pool: &ConnectionPool, + prover_connection_pool: &ConnectionPool, started_at: Instant, id: u32, block_number: L1BatchNumber, diff --git a/prover/witness_generator/src/scheduler.rs b/prover/witness_generator/src/scheduler.rs index 935b8fb11eb9..865844ec49eb 100644 --- a/prover/witness_generator/src/scheduler.rs +++ b/prover/witness_generator/src/scheduler.rs @@ -9,6 +9,7 @@ use circuit_definitions::{ }, eip4844_proof_config, }; +use prover_dal::{Prover, ProverDals}; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_dal::ConnectionPool; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; @@ -62,7 +63,7 @@ pub struct SchedulerWitnessGeneratorJob { pub struct SchedulerWitnessGenerator { config: FriWitnessGeneratorConfig, object_store: Arc, - prover_connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, protocol_versions: Vec, } @@ -70,7 +71,7 @@ impl SchedulerWitnessGenerator { pub async fn new( config: FriWitnessGeneratorConfig, store_factory: &ObjectStoreFactory, - prover_connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, protocol_versions: Vec, ) -> Self { Self { diff --git a/prover/witness_generator/tests/basic_test.rs b/prover/witness_generator/tests/basic_test.rs index d9fdbf0620dc..4034965d1537 100644 --- a/prover/witness_generator/tests/basic_test.rs +++ b/prover/witness_generator/tests/basic_test.rs @@ -2,7 +2,6 @@ use std::time::Instant; use serde::Serialize; use zksync_config::{configs::object_store::ObjectStoreMode, ObjectStoreConfig}; -use zksync_dal::fri_prover_dal::types::{LeafAggregationJobMetadata, NodeAggregationJobMetadata}; use zksync_object_store::ObjectStoreFactory; use zksync_prover_fri_types::{ keys::{AggregationsKey, FriCircuitKey}, @@ -11,6 +10,7 @@ use zksync_prover_fri_types::{ use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_types::{ basic_fri_types::{AggregationRound, FinalProofIds}, + prover_dal::{LeafAggregationJobMetadata, NodeAggregationJobMetadata}, L1BatchNumber, }; use zksync_witness_generator::{ diff --git a/prover/witness_vector_generator/Cargo.toml b/prover/witness_vector_generator/Cargo.toml index 1b43a686daaf..39d4d6fb710b 100644 --- a/prover/witness_vector_generator/Cargo.toml +++ b/prover/witness_vector_generator/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" } zksync_types = { path = "../../core/lib/types" } -zksync_dal = { path = "../../core/lib/dal" } +prover_dal = { path = "../prover_dal" } zksync_config = { path = "../../core/lib/config" } zksync_env_config = { path = "../../core/lib/env_config" } zksync_object_store = { path = "../../core/lib/object_store" } diff --git a/prover/witness_vector_generator/src/generator.rs b/prover/witness_vector_generator/src/generator.rs index 037858bcf2a0..cc8213877c42 100644 --- a/prover/witness_vector_generator/src/generator.rs +++ b/prover/witness_vector_generator/src/generator.rs @@ -6,9 +6,9 @@ use std::{ use anyhow::Context as _; use async_trait::async_trait; +use prover_dal::{ConnectionPool, Prover, ProverDals}; use tokio::{task::JoinHandle, time::sleep}; use zksync_config::configs::FriWitnessVectorGeneratorConfig; -use zksync_dal::{fri_prover_dal::types::GpuProverInstanceStatus, ConnectionPool}; use zksync_object_store::ObjectStore; use zksync_prover_fri_types::{ circuit_definitions::{ @@ -25,14 +25,17 @@ use zksync_prover_fri_utils::{ fetch_next_circuit, get_numeric_circuit_id, socket_utils::send_assembly, }; use zksync_queued_job_processor::JobProcessor; -use zksync_types::{basic_fri_types::CircuitIdRoundTuple, protocol_version::L1VerifierConfig}; +use zksync_types::{ + basic_fri_types::CircuitIdRoundTuple, protocol_version::L1VerifierConfig, + prover_dal::GpuProverInstanceStatus, +}; use zksync_vk_setup_data_server_fri::keystore::Keystore; use crate::metrics::METRICS; pub struct WitnessVectorGenerator { blob_store: Arc, - pool: ConnectionPool, + pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, zone: String, config: FriWitnessVectorGeneratorConfig, @@ -43,7 +46,7 @@ pub struct WitnessVectorGenerator { impl WitnessVectorGenerator { pub fn new( blob_store: Arc, - prover_connection_pool: ConnectionPool, + prover_connection_pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, zone: String, config: FriWitnessVectorGeneratorConfig, @@ -229,7 +232,7 @@ async fn handle_send_result( result: &Result<(Duration, u64), String>, job_id: u32, address: &SocketAddr, - pool: &ConnectionPool, + pool: &ConnectionPool, zone: String, ) { match result { diff --git a/prover/witness_vector_generator/src/main.rs b/prover/witness_vector_generator/src/main.rs index 455f4b6210ff..545da9ef176f 100644 --- a/prover/witness_vector_generator/src/main.rs +++ b/prover/witness_vector_generator/src/main.rs @@ -2,13 +2,13 @@ use anyhow::Context as _; use prometheus_exporter::PrometheusExporterConfig; +use prover_dal::ConnectionPool; use structopt::StructOpt; use tokio::sync::{oneshot, watch}; use zksync_config::configs::{ fri_prover_group::FriProverGroupConfig, FriProverConfig, FriWitnessVectorGeneratorConfig, ObservabilityConfig, PostgresConfig, }; -use zksync_dal::ConnectionPool; use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; use zksync_prover_fri_utils::{get_all_circuit_id_round_tuples_for, region_fetcher::get_zone};