From 24ebf2b17bef714bb616ecf31e440d6a9843de88 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Mon, 11 Mar 2024 22:46:52 +0200 Subject: [PATCH] make dal build --- core/bin/contract-verifier/src/verifier.rs | 6 +- core/bin/snapshots_creator/src/creator.rs | 2 +- core/bin/snapshots_creator/src/tests.rs | 8 +- ...80112ce326bcf4bb38a00366bd24e38881bb2.json | 26 ++++ ...5f7cef0d50c4938105684971e8adc86bb6366.json | 23 ++++ ...50dd2778c0da29697d31710e8b507629ba5c4.json | 20 +++ ...98012b034605dfb4c48379844085b71e9e381.json | 22 ++++ ...d51c3c1cbd9ba8df9d44a29ff96186e35b700.json | 26 ++++ ...fccb19a2f9663a73306dbf61a257050e2b634.json | 22 ++++ ...bcae561c0eea2b1cd8beb245b0cc66ebffcaa.json | 119 ++++++++++++++++++ ...ee38f07b52a709bfd5f3dbab5b4cfef463f1f.json | 20 +++ ...58ee0e803255aa612a124289f53fbb78bd64a.json | 20 +++ ...b5b5cc7e37dd6a607e9453e822e34ba77cdc3.json | 23 ++++ ...2e14fa4c1c4fdad16eff011011878af897946.json | 22 ++++ ...222806762c640b7de7bf6ecdbf9c3f9842d6f.json | 20 +++ ...87f1fb2f138d9a2231477fcaae148c50dbb8a.json | 26 ++++ ...98fa893dbc9654d15753e4a538f021af67b60.json | 20 +++ ...2bef649a6db197a36aca098cd8054909d82e9.json | 118 +++++++++++++++++ ...85c5196c431363802b6b988939c99853b9c97.json | 23 ++++ ...b1fcdd02ba2ae2104a0ef972fe809bf136425.json | 26 ++++ ...741e2713748a17c96af5485423bfe9aaa84ec.json | 26 ++++ ...1fac675dd7b078df61758df382e1038cb4987.json | 26 ++++ core/lib/dal/src/blocks_dal.rs | 14 +-- core/lib/dal/src/blocks_web3_dal.rs | 18 ++- core/lib/dal/src/consensus_dal.rs | 6 +- core/lib/dal/src/contract_verification_dal.rs | 2 +- core/lib/dal/src/eth_sender_dal.rs | 5 +- core/lib/dal/src/events_dal.rs | 11 +- core/lib/dal/src/events_web3_dal.rs | 5 +- core/lib/dal/src/factory_deps_dal.rs | 2 +- core/lib/dal/src/lib.rs | 6 +- .../src/models/storage_witness_job_info.rs | 12 +- core/lib/dal/src/proof_generation_dal.rs | 2 +- core/lib/dal/src/protocol_versions_dal.rs | 2 +- .../lib/dal/src/protocol_versions_web3_dal.rs | 2 +- core/lib/dal/src/snapshot_recovery_dal.rs | 6 +- core/lib/dal/src/snapshots_creator_dal.rs | 6 +- core/lib/dal/src/snapshots_dal.rs | 11 +- core/lib/dal/src/storage_dal.rs | 6 +- core/lib/dal/src/storage_logs_dal.rs | 28 ++--- core/lib/dal/src/storage_logs_dedup_dal.rs | 2 +- core/lib/dal/src/storage_web3_dal.rs | 11 +- core/lib/dal/src/sync_dal.rs | 13 +- core/lib/dal/src/tests/mod.rs | 6 +- core/lib/dal/src/tokens_dal.rs | 15 +-- core/lib/dal/src/tokens_web3_dal.rs | 2 +- core/lib/dal/src/transactions_dal.rs | 9 +- core/lib/dal/src/transactions_web3_dal.rs | 18 ++- core/lib/db_connection/src/instrument.rs | 1 - core/lib/snapshots_applier/src/lib.rs | 10 +- 50 files changed, 737 insertions(+), 138 deletions(-) create mode 100644 core/lib/dal/.sqlx/query-06ce3e5c10d6ae327fc2e45fd9080112ce326bcf4bb38a00366bd24e38881bb2.json create mode 100644 core/lib/dal/.sqlx/query-10b8981f7aa47ce5d3507571af45f7cef0d50c4938105684971e8adc86bb6366.json create mode 100644 core/lib/dal/.sqlx/query-14e5a66ee9a2b7bc56e41c3925150dd2778c0da29697d31710e8b507629ba5c4.json create mode 100644 core/lib/dal/.sqlx/query-254d17b5402c123cca0edd6fcdc98012b034605dfb4c48379844085b71e9e381.json create mode 100644 core/lib/dal/.sqlx/query-3bc70707863d7be1158de1bfb4bd51c3c1cbd9ba8df9d44a29ff96186e35b700.json create mode 100644 core/lib/dal/.sqlx/query-3d2d005b59ba9931286452e029cfccb19a2f9663a73306dbf61a257050e2b634.json create mode 100644 core/lib/dal/.sqlx/query-3d7536cfe7d88dceebff2125a51bcae561c0eea2b1cd8beb245b0cc66ebffcaa.json create mode 100644 core/lib/dal/.sqlx/query-48c03061a662861818e3b0072caee38f07b52a709bfd5f3dbab5b4cfef463f1f.json create mode 100644 core/lib/dal/.sqlx/query-53182f70d83de1482110c767a8d58ee0e803255aa612a124289f53fbb78bd64a.json create mode 100644 core/lib/dal/.sqlx/query-63f5f9bff4b2c15fa4230af2c73b5b5cc7e37dd6a607e9453e822e34ba77cdc3.json create mode 100644 core/lib/dal/.sqlx/query-67ac33ad0ad912e8db6d141e21c2e14fa4c1c4fdad16eff011011878af897946.json create mode 100644 core/lib/dal/.sqlx/query-80684de323c47ade36e5a52b99a222806762c640b7de7bf6ecdbf9c3f9842d6f.json create mode 100644 core/lib/dal/.sqlx/query-8255d112d3929fd8355ba2ac9bc87f1fb2f138d9a2231477fcaae148c50dbb8a.json create mode 100644 core/lib/dal/.sqlx/query-93725851350146c6ec253a59af598fa893dbc9654d15753e4a538f021af67b60.json create mode 100644 core/lib/dal/.sqlx/query-aac861efb4acb81d5cefa598c822bef649a6db197a36aca098cd8054909d82e9.json create mode 100644 core/lib/dal/.sqlx/query-b9f77e6c15f9e635024b73f1fc985c5196c431363802b6b988939c99853b9c97.json create mode 100644 core/lib/dal/.sqlx/query-eb9cd837842490dcb8ab8894ccdb1fcdd02ba2ae2104a0ef972fe809bf136425.json create mode 100644 core/lib/dal/.sqlx/query-f2606e792f08f65d15bdab66a83741e2713748a17c96af5485423bfe9aaa84ec.json create mode 100644 core/lib/dal/.sqlx/query-f3a695b0179ffdacd4baf9eeb6c1fac675dd7b078df61758df382e1038cb4987.json diff --git a/core/bin/contract-verifier/src/verifier.rs b/core/bin/contract-verifier/src/verifier.rs index 9adbf0ce3d2a..b8a93f031b1f 100644 --- a/core/bin/contract-verifier/src/verifier.rs +++ b/core/bin/contract-verifier/src/verifier.rs @@ -12,7 +12,7 @@ use lazy_static::lazy_static; use regex::Regex; use tokio::time; use zksync_config::ContractVerifierConfig; -use zksync_dal::{BasicStorageProcessor, ConnectionPool}; +use zksync_dal::{ConnectionPool, ServerProcessor}; use zksync_env_config::FromEnv; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::{ @@ -54,7 +54,7 @@ impl ContractVerifier { } async fn verify( - storage: &mut BasicStorageProcessor<'_>, + storage: &mut ServerProcessor<'_>, mut request: VerificationRequest, config: ContractVerifierConfig, ) -> Result { @@ -429,7 +429,7 @@ impl ContractVerifier { } async fn process_result( - storage: &mut BasicStorageProcessor<'_>, + storage: &mut ServerProcessor<'_>, request_id: usize, verification_result: Result, ) { diff --git a/core/bin/snapshots_creator/src/creator.rs b/core/bin/snapshots_creator/src/creator.rs index fa3e18cf8bfa..42d782f17114 100644 --- a/core/bin/snapshots_creator/src/creator.rs +++ b/core/bin/snapshots_creator/src/creator.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use anyhow::Context as _; use tokio::sync::Semaphore; use zksync_config::SnapshotsCreatorConfig; -use zksync_dal::{BasicStorageProcessor, ConnectionPool, Server, ServerProcessor}; +use zksync_dal::{ConnectionPool, Server, ServerProcessor}; use zksync_object_store::ObjectStore; use zksync_types::{ snapshots::{ diff --git a/core/bin/snapshots_creator/src/tests.rs b/core/bin/snapshots_creator/src/tests.rs index 67ca41071685..ae13548d33bb 100644 --- a/core/bin/snapshots_creator/src/tests.rs +++ b/core/bin/snapshots_creator/src/tests.rs @@ -10,7 +10,7 @@ use std::{ }; use rand::{thread_rng, Rng}; -use zksync_dal::BasicStorageProcessor; +use zksync_dal::ServerProcessor; use zksync_object_store::ObjectStore; use zksync_types::{ block::{L1BatchHeader, MiniblockHeader}, @@ -132,7 +132,7 @@ struct ExpectedOutputs { } async fn create_miniblock( - conn: &mut BasicStorageProcessor<'_>, + conn: &mut ServerProcessor<'_>, miniblock_number: MiniblockNumber, block_logs: Vec, ) { @@ -162,7 +162,7 @@ async fn create_miniblock( } async fn create_l1_batch( - conn: &mut BasicStorageProcessor<'_>, + conn: &mut ServerProcessor<'_>, l1_batch_number: L1BatchNumber, logs_for_initial_writes: &[StorageLog], ) { @@ -186,7 +186,7 @@ async fn create_l1_batch( async fn prepare_postgres( rng: &mut impl Rng, - conn: &mut BasicStorageProcessor<'_>, + conn: &mut ServerProcessor<'_>, block_count: u32, ) -> ExpectedOutputs { conn.protocol_versions_dal() diff --git a/core/lib/dal/.sqlx/query-06ce3e5c10d6ae327fc2e45fd9080112ce326bcf4bb38a00366bd24e38881bb2.json b/core/lib/dal/.sqlx/query-06ce3e5c10d6ae327fc2e45fd9080112ce326bcf4bb38a00366bd24e38881bb2.json new file mode 100644 index 000000000000..33b2394218f0 --- /dev/null +++ b/core/lib/dal/.sqlx/query-06ce3e5c10d6ae327fc2e45fd9080112ce326bcf4bb38a00366bd24e38881bb2.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT number AS number, false AS \"confirmed!\" FROM l1_batches INNER JOIN eth_txs_history ON l1_batches.eth_execute_tx_id = eth_txs_history.eth_tx_id ORDER BY number DESC LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "confirmed!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + null + ] + }, + "hash": "06ce3e5c10d6ae327fc2e45fd9080112ce326bcf4bb38a00366bd24e38881bb2" +} diff --git a/core/lib/dal/.sqlx/query-10b8981f7aa47ce5d3507571af45f7cef0d50c4938105684971e8adc86bb6366.json b/core/lib/dal/.sqlx/query-10b8981f7aa47ce5d3507571af45f7cef0d50c4938105684971e8adc86bb6366.json new file mode 100644 index 000000000000..d59f237ed3c3 --- /dev/null +++ b/core/lib/dal/.sqlx/query-10b8981f7aa47ce5d3507571af45f7cef0d50c4938105684971e8adc86bb6366.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COALESCE(SUM(predicted_execute_gas_cost), 0) AS \"sum!\" FROM l1_batches WHERE number BETWEEN $1 AND $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "sum!", + "type_info": "Numeric" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "10b8981f7aa47ce5d3507571af45f7cef0d50c4938105684971e8adc86bb6366" +} diff --git a/core/lib/dal/.sqlx/query-14e5a66ee9a2b7bc56e41c3925150dd2778c0da29697d31710e8b507629ba5c4.json b/core/lib/dal/.sqlx/query-14e5a66ee9a2b7bc56e41c3925150dd2778c0da29697d31710e8b507629ba5c4.json new file mode 100644 index 000000000000..1d8b1e4c89e9 --- /dev/null +++ b/core/lib/dal/.sqlx/query-14e5a66ee9a2b7bc56e41c3925150dd2778c0da29697d31710e8b507629ba5c4.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT COALESCE(\n (SELECT (MAX(number) + 1) AS number FROM miniblocks),\n (SELECT (MAX(miniblock_number) + 1) AS number FROM snapshot_recovery),\n 0\n ) AS number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "14e5a66ee9a2b7bc56e41c3925150dd2778c0da29697d31710e8b507629ba5c4" +} diff --git a/core/lib/dal/.sqlx/query-254d17b5402c123cca0edd6fcdc98012b034605dfb4c48379844085b71e9e381.json b/core/lib/dal/.sqlx/query-254d17b5402c123cca0edd6fcdc98012b034605dfb4c48379844085b71e9e381.json new file mode 100644 index 000000000000..8734598cc6f6 --- /dev/null +++ b/core/lib/dal/.sqlx/query-254d17b5402c123cca0edd6fcdc98012b034605dfb4c48379844085b71e9e381.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT nonce FROM eth_txs WHERE from_addr = $1::bytea ORDER BY id DESC LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "nonce", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Bytea" + ] + }, + "nullable": [ + false + ] + }, + "hash": "254d17b5402c123cca0edd6fcdc98012b034605dfb4c48379844085b71e9e381" +} diff --git a/core/lib/dal/.sqlx/query-3bc70707863d7be1158de1bfb4bd51c3c1cbd9ba8df9d44a29ff96186e35b700.json b/core/lib/dal/.sqlx/query-3bc70707863d7be1158de1bfb4bd51c3c1cbd9ba8df9d44a29ff96186e35b700.json new file mode 100644 index 000000000000..bfb41fa6f26f --- /dev/null +++ b/core/lib/dal/.sqlx/query-3bc70707863d7be1158de1bfb4bd51c3c1cbd9ba8df9d44a29ff96186e35b700.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT number AS number, false AS \"confirmed!\" FROM l1_batches INNER JOIN eth_txs_history ON l1_batches.eth_commit_tx_id = eth_txs_history.eth_tx_id ORDER BY number DESC LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "confirmed!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + null + ] + }, + "hash": "3bc70707863d7be1158de1bfb4bd51c3c1cbd9ba8df9d44a29ff96186e35b700" +} diff --git a/core/lib/dal/.sqlx/query-3d2d005b59ba9931286452e029cfccb19a2f9663a73306dbf61a257050e2b634.json b/core/lib/dal/.sqlx/query-3d2d005b59ba9931286452e029cfccb19a2f9663a73306dbf61a257050e2b634.json new file mode 100644 index 000000000000..c049a99bb767 --- /dev/null +++ b/core/lib/dal/.sqlx/query-3d2d005b59ba9931286452e029cfccb19a2f9663a73306dbf61a257050e2b634.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT number FROM miniblocks WHERE number = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "3d2d005b59ba9931286452e029cfccb19a2f9663a73306dbf61a257050e2b634" +} diff --git a/core/lib/dal/.sqlx/query-3d7536cfe7d88dceebff2125a51bcae561c0eea2b1cd8beb245b0cc66ebffcaa.json b/core/lib/dal/.sqlx/query-3d7536cfe7d88dceebff2125a51bcae561c0eea2b1cd8beb245b0cc66ebffcaa.json new file mode 100644 index 000000000000..9a43cc9fd3a0 --- /dev/null +++ b/core/lib/dal/.sqlx/query-3d7536cfe7d88dceebff2125a51bcae561c0eea2b1cd8beb245b0cc66ebffcaa.json @@ -0,0 +1,119 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n transactions.miniblock_number AS block_number,\n transactions.nonce AS nonce,\n transactions.signature AS signature,\n transactions.initiator_address AS initiator_address,\n transactions.tx_format AS tx_format,\n transactions.value AS value,\n transactions.gas_limit AS gas_limit,\n transactions.max_fee_per_gas AS max_fee_per_gas,\n transactions.max_priority_fee_per_gas AS max_priority_fee_per_gas,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.l1_batch_number AS l1_batch_number,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.data->'contractAddress' AS \"execute_contract_address\",\n transactions.data->'calldata' AS \"calldata\",\n miniblocks.hash AS \"block_hash\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n WHERE\n transactions.miniblock_number = $1 AND transactions.index_in_block = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "tx_hash", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "index_in_block", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "block_number", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "nonce", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "signature", + "type_info": "Bytea" + }, + { + "ordinal": 5, + "name": "initiator_address", + "type_info": "Bytea" + }, + { + "ordinal": 6, + "name": "tx_format", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "value", + "type_info": "Numeric" + }, + { + "ordinal": 8, + "name": "gas_limit", + "type_info": "Numeric" + }, + { + "ordinal": 9, + "name": "max_fee_per_gas", + "type_info": "Numeric" + }, + { + "ordinal": 10, + "name": "max_priority_fee_per_gas", + "type_info": "Numeric" + }, + { + "ordinal": 11, + "name": "effective_gas_price", + "type_info": "Numeric" + }, + { + "ordinal": 12, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 13, + "name": "l1_batch_tx_index", + "type_info": "Int4" + }, + { + "ordinal": 14, + "name": "execute_contract_address", + "type_info": "Jsonb" + }, + { + "ordinal": 15, + "name": "calldata", + "type_info": "Jsonb" + }, + { + "ordinal": 16, + "name": "block_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int4" + ] + }, + "nullable": [ + false, + true, + true, + true, + true, + false, + true, + false, + true, + true, + true, + true, + true, + true, + null, + null, + false + ] + }, + "hash": "3d7536cfe7d88dceebff2125a51bcae561c0eea2b1cd8beb245b0cc66ebffcaa" +} diff --git a/core/lib/dal/.sqlx/query-48c03061a662861818e3b0072caee38f07b52a709bfd5f3dbab5b4cfef463f1f.json b/core/lib/dal/.sqlx/query-48c03061a662861818e3b0072caee38f07b52a709bfd5f3dbab5b4cfef463f1f.json new file mode 100644 index 000000000000..009022cc4f8a --- /dev/null +++ b/core/lib/dal/.sqlx/query-48c03061a662861818e3b0072caee38f07b52a709bfd5f3dbab5b4cfef463f1f.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT COALESCE(\n (\n SELECT MAX(number) FROM miniblocks\n WHERE l1_batch_number = (\n SELECT MAX(number) FROM l1_batches\n JOIN eth_txs ON\n l1_batches.eth_execute_tx_id = eth_txs.id\n WHERE\n eth_txs.confirmed_eth_tx_history_id IS NOT NULL\n )\n ),\n 0\n ) AS number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "48c03061a662861818e3b0072caee38f07b52a709bfd5f3dbab5b4cfef463f1f" +} diff --git a/core/lib/dal/.sqlx/query-53182f70d83de1482110c767a8d58ee0e803255aa612a124289f53fbb78bd64a.json b/core/lib/dal/.sqlx/query-53182f70d83de1482110c767a8d58ee0e803255aa612a124289f53fbb78bd64a.json new file mode 100644 index 000000000000..0242ba4ef57b --- /dev/null +++ b/core/lib/dal/.sqlx/query-53182f70d83de1482110c767a8d58ee0e803255aa612a124289f53fbb78bd64a.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT MAX(number) AS number FROM miniblocks", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "53182f70d83de1482110c767a8d58ee0e803255aa612a124289f53fbb78bd64a" +} diff --git a/core/lib/dal/.sqlx/query-63f5f9bff4b2c15fa4230af2c73b5b5cc7e37dd6a607e9453e822e34ba77cdc3.json b/core/lib/dal/.sqlx/query-63f5f9bff4b2c15fa4230af2c73b5b5cc7e37dd6a607e9453e822e34ba77cdc3.json new file mode 100644 index 000000000000..ab2283fa60c3 --- /dev/null +++ b/core/lib/dal/.sqlx/query-63f5f9bff4b2c15fa4230af2c73b5b5cc7e37dd6a607e9453e822e34ba77cdc3.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COALESCE(SUM(predicted_prove_gas_cost), 0) AS \"sum!\" FROM l1_batches WHERE number BETWEEN $1 AND $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "sum!", + "type_info": "Numeric" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "63f5f9bff4b2c15fa4230af2c73b5b5cc7e37dd6a607e9453e822e34ba77cdc3" +} diff --git a/core/lib/dal/.sqlx/query-67ac33ad0ad912e8db6d141e21c2e14fa4c1c4fdad16eff011011878af897946.json b/core/lib/dal/.sqlx/query-67ac33ad0ad912e8db6d141e21c2e14fa4c1c4fdad16eff011011878af897946.json new file mode 100644 index 000000000000..73121576ef95 --- /dev/null +++ b/core/lib/dal/.sqlx/query-67ac33ad0ad912e8db6d141e21c2e14fa4c1c4fdad16eff011011878af897946.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT number FROM miniblocks WHERE hash = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Bytea" + ] + }, + "nullable": [ + false + ] + }, + "hash": "67ac33ad0ad912e8db6d141e21c2e14fa4c1c4fdad16eff011011878af897946" +} diff --git a/core/lib/dal/.sqlx/query-80684de323c47ade36e5a52b99a222806762c640b7de7bf6ecdbf9c3f9842d6f.json b/core/lib/dal/.sqlx/query-80684de323c47ade36e5a52b99a222806762c640b7de7bf6ecdbf9c3f9842d6f.json new file mode 100644 index 000000000000..559e8e229708 --- /dev/null +++ b/core/lib/dal/.sqlx/query-80684de323c47ade36e5a52b99a222806762c640b7de7bf6ecdbf9c3f9842d6f.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT number FROM miniblocks WHERE number = 0", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "80684de323c47ade36e5a52b99a222806762c640b7de7bf6ecdbf9c3f9842d6f" +} diff --git a/core/lib/dal/.sqlx/query-8255d112d3929fd8355ba2ac9bc87f1fb2f138d9a2231477fcaae148c50dbb8a.json b/core/lib/dal/.sqlx/query-8255d112d3929fd8355ba2ac9bc87f1fb2f138d9a2231477fcaae148c50dbb8a.json new file mode 100644 index 000000000000..34f17446dfd3 --- /dev/null +++ b/core/lib/dal/.sqlx/query-8255d112d3929fd8355ba2ac9bc87f1fb2f138d9a2231477fcaae148c50dbb8a.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT number AS number, false AS \"confirmed!\" FROM l1_batches INNER JOIN eth_txs_history ON l1_batches.eth_prove_tx_id = eth_txs_history.eth_tx_id ORDER BY number DESC LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "confirmed!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + null + ] + }, + "hash": "8255d112d3929fd8355ba2ac9bc87f1fb2f138d9a2231477fcaae148c50dbb8a" +} diff --git a/core/lib/dal/.sqlx/query-93725851350146c6ec253a59af598fa893dbc9654d15753e4a538f021af67b60.json b/core/lib/dal/.sqlx/query-93725851350146c6ec253a59af598fa893dbc9654d15753e4a538f021af67b60.json new file mode 100644 index 000000000000..80788846fe69 --- /dev/null +++ b/core/lib/dal/.sqlx/query-93725851350146c6ec253a59af598fa893dbc9654d15753e4a538f021af67b60.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT nonce FROM eth_txs WHERE from_addr IS NULL ORDER BY id DESC LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "nonce", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "93725851350146c6ec253a59af598fa893dbc9654d15753e4a538f021af67b60" +} diff --git a/core/lib/dal/.sqlx/query-aac861efb4acb81d5cefa598c822bef649a6db197a36aca098cd8054909d82e9.json b/core/lib/dal/.sqlx/query-aac861efb4acb81d5cefa598c822bef649a6db197a36aca098cd8054909d82e9.json new file mode 100644 index 000000000000..1122b9c27eb8 --- /dev/null +++ b/core/lib/dal/.sqlx/query-aac861efb4acb81d5cefa598c822bef649a6db197a36aca098cd8054909d82e9.json @@ -0,0 +1,118 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n transactions.miniblock_number AS block_number,\n transactions.nonce AS nonce,\n transactions.signature AS signature,\n transactions.initiator_address AS initiator_address,\n transactions.tx_format AS tx_format,\n transactions.value AS value,\n transactions.gas_limit AS gas_limit,\n transactions.max_fee_per_gas AS max_fee_per_gas,\n transactions.max_priority_fee_per_gas AS max_priority_fee_per_gas,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.l1_batch_number AS l1_batch_number,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.data->'contractAddress' AS \"execute_contract_address\",\n transactions.data->'calldata' AS \"calldata\",\n miniblocks.hash AS \"block_hash\"\n FROM transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n WHERE\n transactions.hash = ANY($1)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "tx_hash", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "index_in_block", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "block_number", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "nonce", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "signature", + "type_info": "Bytea" + }, + { + "ordinal": 5, + "name": "initiator_address", + "type_info": "Bytea" + }, + { + "ordinal": 6, + "name": "tx_format", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "value", + "type_info": "Numeric" + }, + { + "ordinal": 8, + "name": "gas_limit", + "type_info": "Numeric" + }, + { + "ordinal": 9, + "name": "max_fee_per_gas", + "type_info": "Numeric" + }, + { + "ordinal": 10, + "name": "max_priority_fee_per_gas", + "type_info": "Numeric" + }, + { + "ordinal": 11, + "name": "effective_gas_price", + "type_info": "Numeric" + }, + { + "ordinal": 12, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 13, + "name": "l1_batch_tx_index", + "type_info": "Int4" + }, + { + "ordinal": 14, + "name": "execute_contract_address", + "type_info": "Jsonb" + }, + { + "ordinal": 15, + "name": "calldata", + "type_info": "Jsonb" + }, + { + "ordinal": 16, + "name": "block_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "ByteaArray" + ] + }, + "nullable": [ + false, + true, + true, + true, + true, + false, + true, + false, + true, + true, + true, + true, + true, + true, + null, + null, + true + ] + }, + "hash": "aac861efb4acb81d5cefa598c822bef649a6db197a36aca098cd8054909d82e9" +} diff --git a/core/lib/dal/.sqlx/query-b9f77e6c15f9e635024b73f1fc985c5196c431363802b6b988939c99853b9c97.json b/core/lib/dal/.sqlx/query-b9f77e6c15f9e635024b73f1fc985c5196c431363802b6b988939c99853b9c97.json new file mode 100644 index 000000000000..379c1f75d9b3 --- /dev/null +++ b/core/lib/dal/.sqlx/query-b9f77e6c15f9e635024b73f1fc985c5196c431363802b6b988939c99853b9c97.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT COALESCE(SUM(predicted_commit_gas_cost), 0) AS \"sum!\" FROM l1_batches WHERE number BETWEEN $1 AND $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "sum!", + "type_info": "Numeric" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "b9f77e6c15f9e635024b73f1fc985c5196c431363802b6b988939c99853b9c97" +} diff --git a/core/lib/dal/.sqlx/query-eb9cd837842490dcb8ab8894ccdb1fcdd02ba2ae2104a0ef972fe809bf136425.json b/core/lib/dal/.sqlx/query-eb9cd837842490dcb8ab8894ccdb1fcdd02ba2ae2104a0ef972fe809bf136425.json new file mode 100644 index 000000000000..6eee7052bcff --- /dev/null +++ b/core/lib/dal/.sqlx/query-eb9cd837842490dcb8ab8894ccdb1fcdd02ba2ae2104a0ef972fe809bf136425.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT number AS number, true AS \"confirmed!\" FROM l1_batches INNER JOIN eth_txs_history ON l1_batches.eth_execute_tx_id = eth_txs_history.eth_tx_id WHERE eth_txs_history.confirmed_at IS NOT NULL ORDER BY number DESC LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "confirmed!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + null + ] + }, + "hash": "eb9cd837842490dcb8ab8894ccdb1fcdd02ba2ae2104a0ef972fe809bf136425" +} diff --git a/core/lib/dal/.sqlx/query-f2606e792f08f65d15bdab66a83741e2713748a17c96af5485423bfe9aaa84ec.json b/core/lib/dal/.sqlx/query-f2606e792f08f65d15bdab66a83741e2713748a17c96af5485423bfe9aaa84ec.json new file mode 100644 index 000000000000..2986d6e1b5c6 --- /dev/null +++ b/core/lib/dal/.sqlx/query-f2606e792f08f65d15bdab66a83741e2713748a17c96af5485423bfe9aaa84ec.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT number AS number, true AS \"confirmed!\" FROM l1_batches INNER JOIN eth_txs_history ON l1_batches.eth_commit_tx_id = eth_txs_history.eth_tx_id WHERE eth_txs_history.confirmed_at IS NOT NULL ORDER BY number DESC LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "confirmed!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + null + ] + }, + "hash": "f2606e792f08f65d15bdab66a83741e2713748a17c96af5485423bfe9aaa84ec" +} diff --git a/core/lib/dal/.sqlx/query-f3a695b0179ffdacd4baf9eeb6c1fac675dd7b078df61758df382e1038cb4987.json b/core/lib/dal/.sqlx/query-f3a695b0179ffdacd4baf9eeb6c1fac675dd7b078df61758df382e1038cb4987.json new file mode 100644 index 000000000000..7812d637a414 --- /dev/null +++ b/core/lib/dal/.sqlx/query-f3a695b0179ffdacd4baf9eeb6c1fac675dd7b078df61758df382e1038cb4987.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT number AS number, true AS \"confirmed!\" FROM l1_batches INNER JOIN eth_txs_history ON l1_batches.eth_prove_tx_id = eth_txs_history.eth_tx_id WHERE eth_txs_history.confirmed_at IS NOT NULL ORDER BY number DESC LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "confirmed!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + null + ] + }, + "hash": "f3a695b0179ffdacd4baf9eeb6c1fac675dd7b078df61758df382e1038cb4987" +} diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index b8910b6d1615..71d1e44f5691 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -7,9 +7,7 @@ use std::{ use anyhow::Context as _; use bigdecimal::{BigDecimal, FromPrimitive, ToPrimitive}; use zksync_db_connection::{ - instrument::InstrumentExt, - match_query_as, - processor::{BasicStorageProcessor, StorageProcessor}, + instrument::InstrumentExt, interpolate_query, match_query_as, processor::StorageProcessor, }; use zksync_types::{ aggregated_operations::AggregatedActionType, @@ -2345,11 +2343,11 @@ mod tests { }; use super::*; - use crate::{tests::create_miniblock_header, ConnectionPool}; + use crate::{tests::create_miniblock_header, ConnectionPool, Server}; #[tokio::test] async fn loading_l1_batch_header() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -2405,7 +2403,7 @@ mod tests { #[tokio::test] async fn getting_predicted_gas() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -2466,7 +2464,7 @@ mod tests { #[allow(deprecated)] // that's the whole point #[tokio::test] async fn checking_fee_account_address_in_l1_batches() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); assert!(conn .blocks_dal() @@ -2478,7 +2476,7 @@ mod tests { #[allow(deprecated)] // that's the whole point #[tokio::test] async fn ensuring_fee_account_address_for_miniblocks() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 0bafd899babe..1af22146f99e 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -1,7 +1,5 @@ use zksync_db_connection::{ - instrument::InstrumentExt, - match_query_as, - processor::{BasicStorageProcessor, StorageProcessor}, + instrument::InstrumentExt, interpolate_query, match_query_as, processor::StorageProcessor, }; use zksync_system_constants::EMPTY_UNCLES_HASH; use zksync_types::{ @@ -658,12 +656,12 @@ mod tests { create_miniblock_header, create_snapshot_recovery, mock_execution_result, mock_l2_transaction, }, - ConnectionPool, + ConnectionPool, Server, }; #[tokio::test] async fn getting_web3_block_and_tx_count() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.blocks_dal() .delete_miniblocks(MiniblockNumber(0)) @@ -711,7 +709,7 @@ mod tests { #[tokio::test] async fn resolving_earliest_block_id() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); let miniblock_number = conn @@ -737,7 +735,7 @@ mod tests { #[tokio::test] async fn resolving_latest_block_id() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -803,7 +801,7 @@ mod tests { #[tokio::test] async fn resolving_pending_block_id_for_snapshot_recovery() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); let snapshot_recovery = create_snapshot_recovery(); conn.snapshot_recovery_dal() @@ -821,7 +819,7 @@ mod tests { #[tokio::test] async fn resolving_block_by_hash() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -850,7 +848,7 @@ mod tests { #[tokio::test] async fn getting_traces_for_block() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index 609a7d42db86..6ed93d545b92 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -1,7 +1,7 @@ use anyhow::Context as _; use zksync_consensus_roles::validator; use zksync_consensus_storage::ReplicaState; -use zksync_db_connection::processor::{BasicStorageProcessor, StorageProcessor}; +use zksync_db_connection::processor::StorageProcessor; use zksync_types::MiniblockNumber; pub use crate::models::consensus::Payload; @@ -286,12 +286,12 @@ mod tests { use zksync_consensus_roles::validator; use zksync_consensus_storage::ReplicaState; - use crate::ConnectionPool; + use crate::{ConnectionPool, Server}; #[tokio::test] async fn replica_state_read_write() { let rng = &mut rand::thread_rng(); - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); assert_eq!(None, conn.consensus_dal().genesis().await.unwrap()); for n in 0..3 { diff --git a/core/lib/dal/src/contract_verification_dal.rs b/core/lib/dal/src/contract_verification_dal.rs index c06c1b0a7e31..0bc3777d5bd5 100644 --- a/core/lib/dal/src/contract_verification_dal.rs +++ b/core/lib/dal/src/contract_verification_dal.rs @@ -5,7 +5,7 @@ use std::{ use anyhow::Context as _; use sqlx::postgres::types::PgInterval; -use zksync_db_connection::processor::{BasicStorageProcessor, StorageProcessor}; +use zksync_db_connection::processor::StorageProcessor; use zksync_types::{ contract_verification_api::{ DeployContractCalldata, VerificationIncomingRequest, VerificationInfo, VerificationRequest, diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index 902d8d7ee1f3..3adbd99f9061 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -2,10 +2,7 @@ use std::{convert::TryFrom, str::FromStr}; use anyhow::Context as _; use sqlx::types::chrono::{DateTime, Utc}; -use zksync_db_connection::{ - match_query_as, - processor::{BasicStorageProcessor, StorageProcessor}, -}; +use zksync_db_connection::{interpolate_query, match_query_as, processor::StorageProcessor}; use zksync_types::{ aggregated_operations::AggregatedActionType, eth_sender::{EthTx, EthTxBlobSidecar, TxHistory, TxHistoryToSend}, diff --git a/core/lib/dal/src/events_dal.rs b/core/lib/dal/src/events_dal.rs index 8fc61b1b0b57..d784642972a2 100644 --- a/core/lib/dal/src/events_dal.rs +++ b/core/lib/dal/src/events_dal.rs @@ -1,10 +1,7 @@ use std::{collections::HashMap, fmt}; use sqlx::types::chrono::Utc; -use zksync_db_connection::{ - processor::{BasicStorageProcessor, StorageProcessor}, - write_str, writeln_str, -}; +use zksync_db_connection::{processor::StorageProcessor, write_str, writeln_str}; use zksync_system_constants::L1_MESSENGER_ADDRESS; use zksync_types::{ api, @@ -338,7 +335,7 @@ mod tests { use zksync_types::{Address, L1BatchNumber, ProtocolVersion}; use super::*; - use crate::{tests::create_miniblock_header, ConnectionPool}; + use crate::{tests::create_miniblock_header, ConnectionPool, Server}; fn create_vm_event(index: u8, topic_count: u8) -> VmEvent { assert!(topic_count <= 4); @@ -352,7 +349,7 @@ mod tests { #[tokio::test] async fn storing_events() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.events_dal().rollback_events(MiniblockNumber(0)).await; conn.blocks_dal() @@ -428,7 +425,7 @@ mod tests { #[tokio::test] async fn storing_l2_to_l1_logs() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.events_dal() .rollback_l2_to_l1_logs(MiniblockNumber(0)) diff --git a/core/lib/dal/src/events_web3_dal.rs b/core/lib/dal/src/events_web3_dal.rs index c228bd4f1a64..033cc49922e5 100644 --- a/core/lib/dal/src/events_web3_dal.rs +++ b/core/lib/dal/src/events_web3_dal.rs @@ -3,10 +3,7 @@ use sqlx::{ query::{Query, QueryAs}, Postgres, Row, }; -use zksync_db_connection::{ - instrument::InstrumentExt, - processor::{BasicStorageProcessor, StorageProcessor}, -}; +use zksync_db_connection::{instrument::InstrumentExt, processor::StorageProcessor}; use zksync_types::{ api::{GetLogsFilter, Log}, Address, MiniblockNumber, H256, diff --git a/core/lib/dal/src/factory_deps_dal.rs b/core/lib/dal/src/factory_deps_dal.rs index 2e8a49150de6..a260d30ad0a4 100644 --- a/core/lib/dal/src/factory_deps_dal.rs +++ b/core/lib/dal/src/factory_deps_dal.rs @@ -2,7 +2,7 @@ use std::collections::{HashMap, HashSet}; use anyhow::Context as _; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; -use zksync_db_connection::processor::{BasicStorageProcessor, StorageProcessor}; +use zksync_db_connection::processor::StorageProcessor; use zksync_types::{MiniblockNumber, H256, U256}; use zksync_utils::{bytes_to_be_words, bytes_to_chunks}; diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index 7ebf79a8f384..ca7a1f1a6a1a 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -1,10 +1,10 @@ //! Data access layer (DAL) for zkSync Era. -use sqlx::{pool::PoolConnection, PgConnection, Postgres}; +use sqlx::PgConnection; pub use sqlx::{types::BigDecimal, Error as SqlxError}; pub use zksync_db_connection::connection::ConnectionPool; use zksync_db_connection::processor::{ - BasicStorageProcessor, StorageKind, StorageProcessor, StorageProcessorTags, TracedConnections, + async_trait, BasicStorageProcessor, StorageKind, StorageProcessor, StorageProcessorTags, }; use crate::{ @@ -82,7 +82,7 @@ impl<'a> StorageProcessor for ServerProcessor<'a> { } async fn commit(self) -> sqlx::Result<()> { - self.0.commit() + self.0.commit().await } fn conn(&mut self) -> &mut PgConnection { diff --git a/core/lib/dal/src/models/storage_witness_job_info.rs b/core/lib/dal/src/models/storage_witness_job_info.rs index ea8e15fb9c99..03d1120b7170 100644 --- a/core/lib/dal/src/models/storage_witness_job_info.rs +++ b/core/lib/dal/src/models/storage_witness_job_info.rs @@ -1,11 +1,13 @@ use std::{convert::TryFrom, str::FromStr}; use sqlx::types::chrono::{DateTime, NaiveDateTime, NaiveTime, Utc}; -use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; - -use crate::fri_prover_dal::types::{ - JobPosition, WitnessJobInfo, WitnessJobStatus, WitnessJobStatusFailed, - WitnessJobStatusSuccessful, +use zksync_types::{ + basic_fri_types::AggregationRound, + prover_dal::{ + JobPosition, WitnessJobInfo, WitnessJobStatus, WitnessJobStatusFailed, + WitnessJobStatusSuccessful, + }, + L1BatchNumber, }; #[derive(sqlx::FromRow)] diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index 7844c5075dbf..7c9c7a308adc 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -1,7 +1,7 @@ use std::time::Duration; use strum::{Display, EnumString}; -use zksync_db_connection::processor::{BasicStorageProcessor, StorageProcessor}; +use zksync_db_connection::processor::StorageProcessor; use zksync_types::L1BatchNumber; use crate::{time_utils::pg_interval_from_duration, ServerProcessor, SqlxError}; diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index 3553b91ec2a3..bd461907527b 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -2,7 +2,7 @@ use std::convert::TryInto; use anyhow::Context as _; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; -use zksync_db_connection::processor::{BasicStorageProcessor, StorageProcessor}; +use zksync_db_connection::processor::StorageProcessor; use zksync_types::{ protocol_version::{L1VerifierConfig, ProtocolUpgradeTx, ProtocolVersion, VerifierParams}, Address, ProtocolVersionId, H256, diff --git a/core/lib/dal/src/protocol_versions_web3_dal.rs b/core/lib/dal/src/protocol_versions_web3_dal.rs index 0b8a8a2b9b5f..659ca01a25b5 100644 --- a/core/lib/dal/src/protocol_versions_web3_dal.rs +++ b/core/lib/dal/src/protocol_versions_web3_dal.rs @@ -1,4 +1,4 @@ -use zksync_db_connection::processor::{BasicStorageProcessor, StorageProcessor}; +use zksync_db_connection::processor::StorageProcessor; use zksync_types::api::ProtocolVersion; use crate::{models::storage_protocol_version::StorageProtocolVersion, ServerProcessor}; diff --git a/core/lib/dal/src/snapshot_recovery_dal.rs b/core/lib/dal/src/snapshot_recovery_dal.rs index 82fa35c8d1e9..094223b54e4f 100644 --- a/core/lib/dal/src/snapshot_recovery_dal.rs +++ b/core/lib/dal/src/snapshot_recovery_dal.rs @@ -1,4 +1,4 @@ -use zksync_db_connection::processor::{BasicStorageProcessor, StorageProcessor}; +use zksync_db_connection::processor::StorageProcessor; use zksync_types::{ snapshots::SnapshotRecoveryStatus, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, }; @@ -106,11 +106,11 @@ mod tests { snapshots::SnapshotRecoveryStatus, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, }; - use crate::ConnectionPool; + use crate::{ConnectionPool, Server}; #[tokio::test] async fn manipulating_snapshot_recovery_table() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); let mut applied_status_dal = conn.snapshot_recovery_dal(); let empty_status = applied_status_dal diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs index 1aa043301502..b2cd6de45342 100644 --- a/core/lib/dal/src/snapshots_creator_dal.rs +++ b/core/lib/dal/src/snapshots_creator_dal.rs @@ -171,11 +171,11 @@ mod tests { use zksync_types::StorageLog; use super::*; - use crate::ConnectionPool; + use crate::{ConnectionPool, Server}; #[tokio::test] async fn getting_storage_log_chunks_basics() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let logs = (0..100).map(|i| { @@ -287,7 +287,7 @@ mod tests { #[tokio::test] async fn phantom_writes_are_filtered_out() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let key = StorageKey::new(AccountTreeId::default(), H256::repeat_byte(1)); diff --git a/core/lib/dal/src/snapshots_dal.rs b/core/lib/dal/src/snapshots_dal.rs index acf5d46a3637..05e36449ecc0 100644 --- a/core/lib/dal/src/snapshots_dal.rs +++ b/core/lib/dal/src/snapshots_dal.rs @@ -1,7 +1,4 @@ -use zksync_db_connection::{ - instrument::InstrumentExt, - processor::{BasicStorageProcessor, StorageProcessor}, -}; +use zksync_db_connection::{instrument::InstrumentExt, processor::StorageProcessor}; use zksync_types::{ snapshots::{AllSnapshots, SnapshotMetadata}, L1BatchNumber, @@ -174,11 +171,11 @@ impl SnapshotsDal<'_, '_> { mod tests { use zksync_types::L1BatchNumber; - use crate::ConnectionPool; + use crate::{ConnectionPool, Server}; #[tokio::test] async fn adding_snapshot() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let mut dal = conn.snapshots_dal(); let l1_batch_number = L1BatchNumber(100); @@ -218,7 +215,7 @@ mod tests { #[tokio::test] async fn adding_files() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let mut dal = conn.snapshots_dal(); let l1_batch_number = L1BatchNumber(100); diff --git a/core/lib/dal/src/storage_dal.rs b/core/lib/dal/src/storage_dal.rs index 4e0faa009fc2..7d3aab6541df 100644 --- a/core/lib/dal/src/storage_dal.rs +++ b/core/lib/dal/src/storage_dal.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use itertools::Itertools; -use zksync_db_connection::processor::{BasicStorageProcessor, StorageProcessor}; +use zksync_db_connection::processor::StorageProcessor; use zksync_types::{StorageKey, StorageLog, StorageValue, H256}; use crate::ServerProcessor; @@ -99,12 +99,12 @@ mod tests { use zksync_types::{AccountTreeId, Address}; use super::*; - use crate::ConnectionPool; + use crate::{ConnectionPool, Server}; #[allow(deprecated)] #[tokio::test] async fn applying_storage_logs() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let account = AccountTreeId::new(Address::repeat_byte(1)); diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index a8965fad82dd..6f270a8907ed 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -2,9 +2,7 @@ use std::{collections::HashMap, ops, time::Instant}; use sqlx::{types::chrono::Utc, Row}; use zksync_db_connection::{ - instrument::InstrumentExt, - processor::{BasicStorageProcessor, StorageProcessor}, - write_str, writeln_str, + instrument::InstrumentExt, processor::StorageProcessor, write_str, writeln_str, }; use zksync_types::{ get_code_key, snapshots::SnapshotStorageLog, AccountTreeId, Address, L1BatchNumber, @@ -843,13 +841,9 @@ mod tests { use zksync_types::{block::L1BatchHeader, ProtocolVersion, ProtocolVersionId}; use super::*; - use crate::{tests::create_miniblock_header, ConnectionPool}; + use crate::{tests::create_miniblock_header, ConnectionPool, Server}; - async fn insert_miniblock( - conn: &mut BasicStorageProcessor<'_>, - number: u32, - logs: Vec, - ) { + async fn insert_miniblock(conn: &mut ServerProcessor<'_>, number: u32, logs: Vec) { let header = L1BatchHeader::new( L1BatchNumber(number), 0, @@ -880,7 +874,7 @@ mod tests { #[tokio::test] async fn inserting_storage_logs() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -925,7 +919,7 @@ mod tests { } async fn test_rollback( - conn: &mut BasicStorageProcessor<'_>, + conn: &mut ServerProcessor<'_>, key: StorageKey, second_key: StorageKey, ) { @@ -1003,7 +997,7 @@ mod tests { #[tokio::test] async fn getting_storage_logs_for_revert() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -1053,7 +1047,7 @@ mod tests { #[tokio::test] async fn reverting_keys_without_initial_write() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -1120,7 +1114,7 @@ mod tests { #[tokio::test] async fn getting_starting_entries_in_chunks() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let sorted_hashed_keys = prepare_tree_entries(&mut conn, 100).await; @@ -1153,7 +1147,7 @@ mod tests { } } - async fn prepare_tree_entries(conn: &mut BasicStorageProcessor<'_>, count: u8) -> Vec { + async fn prepare_tree_entries(conn: &mut ServerProcessor<'_>, count: u8) -> Vec { conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; @@ -1181,7 +1175,7 @@ mod tests { #[tokio::test] async fn getting_tree_entries() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); let sorted_hashed_keys = prepare_tree_entries(&mut conn, 10).await; @@ -1223,7 +1217,7 @@ mod tests { FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, ); - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); // If deployment fails then two writes are issued, one that writes `bytecode_hash` to the "correct" value, // and the next write reverts its value back to `FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH`. diff --git a/core/lib/dal/src/storage_logs_dedup_dal.rs b/core/lib/dal/src/storage_logs_dedup_dal.rs index 1e223795abd0..e79fb1588904 100644 --- a/core/lib/dal/src/storage_logs_dedup_dal.rs +++ b/core/lib/dal/src/storage_logs_dedup_dal.rs @@ -1,7 +1,7 @@ use std::collections::HashSet; use sqlx::types::chrono::Utc; -use zksync_db_connection::processor::{BasicStorageProcessor, StorageProcessor}; +use zksync_db_connection::processor::StorageProcessor; use zksync_types::{ snapshots::SnapshotStorageLog, zk_evm_types::LogQuery, AccountTreeId, Address, L1BatchNumber, StorageKey, H256, diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index 330f52386479..fc079388c469 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -1,9 +1,6 @@ use std::{collections::HashMap, ops}; -use zksync_db_connection::{ - instrument::InstrumentExt, - processor::{BasicStorageProcessor, StorageProcessor}, -}; +use zksync_db_connection::{instrument::InstrumentExt, processor::StorageProcessor}; use zksync_types::{ get_code_key, get_nonce_key, utils::{decompose_full_nonce, storage_key_for_standard_token_balance}, @@ -305,12 +302,12 @@ mod tests { use super::*; use crate::{ tests::{create_miniblock_header, create_snapshot_recovery}, - ConnectionPool, + ConnectionPool, Server, }; #[tokio::test] async fn resolving_l1_batch_number_of_miniblock() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -377,7 +374,7 @@ mod tests { #[tokio::test] async fn resolving_l1_batch_number_of_miniblock_with_snapshot_recovery() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index 7841b6ceff88..f2ebc12b0472 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -2,11 +2,13 @@ use zksync_db_connection::instrument::InstrumentExt; use zksync_types::{api::en, MiniblockNumber}; use crate::{ - metrics::MethodLatency, models::storage_sync::{StorageSyncBlock, SyncBlock}, ServerProcessor, }; +// todo: uncomment +// use crate::metrics::MethodLatency, + /// DAL subset dedicated to the EN synchronization. #[derive(Debug)] pub struct SyncDal<'a, 'c> { @@ -86,7 +88,8 @@ impl SyncDal<'_, '_> { block_number: MiniblockNumber, include_transactions: bool, ) -> anyhow::Result> { - let _latency = MethodLatency::new("sync_dal_sync_block"); + // todo: uncomment + //let _latency = MethodLatency::new("sync_dal_sync_block"); let Some(block) = self.sync_block_inner(block_number).await? else { return Ok(None); }; @@ -118,12 +121,12 @@ mod tests { create_miniblock_header, create_snapshot_recovery, mock_execution_result, mock_l2_transaction, }, - ConnectionPool, + ConnectionPool, Server, }; #[tokio::test] async fn sync_block_basics() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); // Simulate genesis. @@ -239,7 +242,7 @@ mod tests { #[tokio::test] async fn sync_block_after_snapshot_recovery() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut conn = pool.access_storage().await.unwrap(); // Simulate snapshot recovery. diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 3805150ae228..335a62cdc0e4 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -165,7 +165,7 @@ pub(crate) fn create_snapshot_recovery() -> SnapshotRecoveryStatus { #[tokio::test] async fn workflow_with_submit_tx_equal_hashes() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let storage = &mut connection_pool.access_storage().await.unwrap(); let mut transactions_dal = TransactionsDal { storage }; @@ -185,7 +185,7 @@ async fn workflow_with_submit_tx_equal_hashes() { #[tokio::test] async fn workflow_with_submit_tx_diff_hashes() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let storage = &mut connection_pool.access_storage().await.unwrap(); let mut transactions_dal = TransactionsDal { storage }; @@ -212,7 +212,7 @@ async fn workflow_with_submit_tx_diff_hashes() { #[tokio::test] async fn remove_stuck_txs() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let storage = &mut connection_pool.access_storage().await.unwrap(); let mut protocol_versions_dal = ProtocolVersionsDal { storage }; protocol_versions_dal diff --git a/core/lib/dal/src/tokens_dal.rs b/core/lib/dal/src/tokens_dal.rs index db415744cc17..16a213f0398f 100644 --- a/core/lib/dal/src/tokens_dal.rs +++ b/core/lib/dal/src/tokens_dal.rs @@ -1,8 +1,5 @@ use sqlx::types::chrono::Utc; -use zksync_db_connection::{ - processor::{BasicStorageProcessor, StorageProcessor}, - write_str, writeln_str, -}; +use zksync_db_connection::{processor::StorageProcessor, write_str, writeln_str}; use zksync_types::{tokens::TokenInfo, Address, MiniblockNumber}; use crate::ServerProcessor; @@ -115,7 +112,7 @@ mod tests { use zksync_types::{get_code_key, tokens::TokenMetadata, StorageLog, H256}; use super::*; - use crate::ConnectionPool; + use crate::{ConnectionPool, Server}; fn test_token_info() -> TokenInfo { TokenInfo { @@ -143,7 +140,7 @@ mod tests { #[tokio::test] async fn adding_and_getting_tokens() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let tokens = [test_token_info(), eth_token_info()]; storage.tokens_dal().add_tokens(&tokens).await.unwrap(); @@ -190,7 +187,7 @@ mod tests { #[tokio::test] async fn rolling_back_tokens() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let eth_info = eth_token_info(); @@ -260,7 +257,7 @@ mod tests { ); } - async fn test_getting_all_tokens(storage: &mut BasicStorageProcessor<'_>) { + async fn test_getting_all_tokens(storage: &mut ServerProcessor<'_>) { for at_miniblock in [None, Some(MiniblockNumber(2)), Some(MiniblockNumber(100))] { let all_tokens = storage .tokens_web3_dal() @@ -284,7 +281,7 @@ mod tests { #[tokio::test] async fn rolling_back_tokens_with_failed_deployment() { - let pool = ConnectionPool::test_pool().await; + let pool = ConnectionPool::::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let test_info = test_token_info(); diff --git a/core/lib/dal/src/tokens_web3_dal.rs b/core/lib/dal/src/tokens_web3_dal.rs index 4d1d77df9e39..3ffd8782887a 100644 --- a/core/lib/dal/src/tokens_web3_dal.rs +++ b/core/lib/dal/src/tokens_web3_dal.rs @@ -1,4 +1,4 @@ -use zksync_db_connection::processor::{BasicStorageProcessor, StorageProcessor}; +use zksync_db_connection::processor::StorageProcessor; use zksync_types::{ tokens::{TokenInfo, TokenMetadata}, Address, MiniblockNumber, diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 67cf6bf9bcc0..1baa258f563d 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -4,10 +4,7 @@ use anyhow::Context as _; use bigdecimal::BigDecimal; use itertools::Itertools; use sqlx::{error, types::chrono::NaiveDateTime}; -use zksync_db_connection::{ - instrument::InstrumentExt, - processor::{BasicStorageProcessor, StorageProcessor}, -}; +use zksync_db_connection::{instrument::InstrumentExt, processor::StorageProcessor}; use zksync_types::{ block::MiniblockExecutionData, fee::TransactionExecutionMetrics, @@ -1336,12 +1333,12 @@ mod tests { use super::*; use crate::{ tests::{create_miniblock_header, mock_execution_result, mock_l2_transaction}, - ConnectionPool, + ConnectionPool, Server, }; #[tokio::test] async fn getting_call_trace_for_transaction() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index 0b3f84c354aa..3776616a6131 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -1,8 +1,6 @@ use sqlx::types::chrono::NaiveDateTime; use zksync_db_connection::{ - instrument::InstrumentExt, - match_query_as, - processor::{BasicStorageProcessor, StorageProcessor}, + instrument::InstrumentExt, interpolate_query, match_query_as, processor::StorageProcessor, }; use zksync_types::{ api, api::TransactionReceipt, Address, L2ChainId, MiniblockNumber, Transaction, @@ -399,10 +397,10 @@ mod tests { use super::*; use crate::{ tests::{create_miniblock_header, mock_execution_result, mock_l2_transaction}, - ConnectionPool, + ConnectionPool, Server, }; - async fn prepare_transactions(conn: &mut BasicStorageProcessor<'_>, txs: Vec) { + async fn prepare_transactions(conn: &mut ServerProcessor<'_>, txs: Vec) { conn.blocks_dal() .delete_miniblocks(MiniblockNumber(0)) .await @@ -436,7 +434,7 @@ mod tests { #[tokio::test] async fn getting_transaction() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -486,7 +484,7 @@ mod tests { #[tokio::test] async fn getting_receipts() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -514,7 +512,7 @@ mod tests { #[tokio::test] async fn getting_miniblock_transactions() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -541,7 +539,7 @@ mod tests { #[tokio::test] async fn getting_next_nonce_by_initiator_account() { - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) @@ -611,7 +609,7 @@ mod tests { #[tokio::test] async fn getting_next_nonce_by_initiator_account_after_snapshot_recovery() { // Emulate snapshot recovery: no transactions with past nonces are present in the storage - let connection_pool = ConnectionPool::test_pool().await; + let connection_pool = ConnectionPool::::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); let initiator = Address::repeat_byte(1); let next_nonce = conn diff --git a/core/lib/db_connection/src/instrument.rs b/core/lib/db_connection/src/instrument.rs index 3746258fd9d0..76705b4585c4 100644 --- a/core/lib/db_connection/src/instrument.rs +++ b/core/lib/db_connection/src/instrument.rs @@ -21,7 +21,6 @@ use sqlx::{ use tokio::time::Instant; use crate::{ - connection::ConnectionPool, metrics::REQUEST_METRICS, processor::{StorageProcessor, StorageProcessorTags}, }; diff --git a/core/lib/snapshots_applier/src/lib.rs b/core/lib/snapshots_applier/src/lib.rs index 0ccdbf92582b..2cbb4759fb52 100644 --- a/core/lib/snapshots_applier/src/lib.rs +++ b/core/lib/snapshots_applier/src/lib.rs @@ -6,7 +6,7 @@ use anyhow::Context as _; use async_trait::async_trait; use serde::Serialize; use tokio::sync::Semaphore; -use zksync_dal::{BasicStorageProcessor, ConnectionPool, Server, SqlxError}; +use zksync_dal::{ConnectionPool, Server, ServerProcessor, SqlxError}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_object_store::{ObjectStore, ObjectStoreError}; use zksync_types::{ @@ -246,7 +246,7 @@ struct SnapshotsApplier<'a> { impl<'a> SnapshotsApplier<'a> { /// Recovers [`SnapshotRecoveryStatus`] from the storage and the main node. async fn prepare_applied_snapshot_status( - storage: &mut BasicStorageProcessor<'_>, + storage: &mut ServerProcessor<'_>, main_node_client: &dyn SnapshotsApplierMainNodeClient, ) -> Result<(SnapshotRecoveryStatus, bool), SnapshotsApplierError> { let latency = @@ -428,7 +428,7 @@ impl<'a> SnapshotsApplier<'a> { async fn recover_factory_deps( &mut self, - storage: &mut BasicStorageProcessor<'_>, + storage: &mut ServerProcessor<'_>, ) -> Result<(), SnapshotsApplierError> { let latency = METRICS.initial_stage_duration[&InitialStage::ApplyFactoryDeps].start(); @@ -472,7 +472,7 @@ impl<'a> SnapshotsApplier<'a> { &self, chunk_id: u64, storage_logs: &[SnapshotStorageLog], - storage: &mut BasicStorageProcessor<'_>, + storage: &mut ServerProcessor<'_>, ) -> Result<(), SnapshotsApplierError> { storage .storage_logs_dedup_dal() @@ -490,7 +490,7 @@ impl<'a> SnapshotsApplier<'a> { &self, chunk_id: u64, storage_logs: &[SnapshotStorageLog], - storage: &mut BasicStorageProcessor<'_>, + storage: &mut ServerProcessor<'_>, ) -> Result<(), SnapshotsApplierError> { storage .storage_logs_dal()