From 67475292ff770d2edd6884be27f976a4144778ae Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 16 Oct 2024 10:19:14 +0300 Subject: [PATCH 1/9] feat(api): Make acceptable values cache lag configurable (#3028) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Allows to configure acceptable values cache lag (measured in the number of L2 blocks). Increases the default value from 5 to 20 blocks. ## Why ❔ Currently, acceptable lag is hard-coded and is arguably too small at times. It can lead to the values cache getting reset during Postgres usage surges (e.g., when sealing an L1 batch). ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- core/bin/external_node/src/node_builder.rs | 1 + core/bin/zksync_server/src/node_builder.rs | 1 + core/lib/config/src/configs/api.rs | 34 ++-- core/lib/config/src/testonly.rs | 1 + core/lib/env_config/src/api.rs | 2 + core/lib/protobuf_config/src/api.rs | 8 +- .../src/proto/config/api.proto | 1 + core/lib/state/src/cache/lru_cache.rs | 7 + core/lib/state/src/postgres/mod.rs | 185 ++++++++++-------- core/lib/state/src/postgres/tests.rs | 4 +- .../layers/web3_api/tx_sender.rs | 12 +- 11 files changed, 157 insertions(+), 99 deletions(-) diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index 14e09b9c2a7a..7d8489013535 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -378,6 +378,7 @@ impl ExternalNodeBuilder { factory_deps_cache_size: self.config.optional.factory_deps_cache_size() as u64, initial_writes_cache_size: self.config.optional.initial_writes_cache_size() as u64, latest_values_cache_size: self.config.optional.latest_values_cache_size() as u64, + latest_values_max_block_lag: 20, // reasonable default }; let max_vm_concurrency = self.config.optional.vm_concurrency_limit; let tx_sender_layer = TxSenderLayer::new( diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 9fdbc129b195..b04227965f8c 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -305,6 +305,7 @@ impl MainNodeBuilder { factory_deps_cache_size: rpc_config.factory_deps_cache_size() as u64, initial_writes_cache_size: rpc_config.initial_writes_cache_size() as u64, latest_values_cache_size: rpc_config.latest_values_cache_size() as u64, + latest_values_max_block_lag: rpc_config.latest_values_max_block_lag(), }; // On main node we always use master pool sink. diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index 86c9ebd074d8..ce0d96129584 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -189,6 +189,10 @@ pub struct Web3JsonRpcConfig { /// Latest values cache size in MiBs. The default value is 128 MiB. If set to 0, the latest /// values cache will be disabled. pub latest_values_cache_size_mb: Option, + /// Maximum lag in the number of blocks for the latest values cache after which the cache is reset. Greater values + /// lead to increased the cache update latency, i.e., less storage queries being processed by the cache. OTOH, smaller values + /// can lead to spurious resets when Postgres lags for whatever reason (e.g., when sealing L1 batches). + pub latest_values_max_block_lag: Option, /// Limit for fee history block range. pub fee_history_limit: Option, /// Maximum number of requests in a single batch JSON RPC request. Default is 500. @@ -243,20 +247,21 @@ impl Web3JsonRpcConfig { estimate_gas_acceptable_overestimation: 1000, estimate_gas_optimize_search: false, max_tx_size: 1000000, - vm_execution_cache_misses_limit: Default::default(), - vm_concurrency_limit: Default::default(), - factory_deps_cache_size_mb: Default::default(), - initial_writes_cache_size_mb: Default::default(), - latest_values_cache_size_mb: Default::default(), - fee_history_limit: Default::default(), - max_batch_request_size: Default::default(), - max_response_body_size_mb: Default::default(), + vm_execution_cache_misses_limit: None, + vm_concurrency_limit: None, + factory_deps_cache_size_mb: None, + initial_writes_cache_size_mb: None, + latest_values_cache_size_mb: None, + latest_values_max_block_lag: None, + fee_history_limit: None, + max_batch_request_size: None, + max_response_body_size_mb: None, max_response_body_size_overrides_mb: MaxResponseSizeOverrides::empty(), - websocket_requests_per_minute_limit: Default::default(), - mempool_cache_update_interval: Default::default(), - mempool_cache_size: Default::default(), + websocket_requests_per_minute_limit: None, + mempool_cache_update_interval: None, + mempool_cache_size: None, tree_api_url: None, - whitelisted_tokens_for_aa: Default::default(), + whitelisted_tokens_for_aa: vec![], api_namespaces: None, extended_api_tracing: false, } @@ -308,6 +313,11 @@ impl Web3JsonRpcConfig { self.latest_values_cache_size_mb.unwrap_or(128) * super::BYTES_IN_MEGABYTE } + /// Returns the maximum lag in the number of blocks for the latest values cache. + pub fn latest_values_max_block_lag(&self) -> u32 { + self.latest_values_max_block_lag.map_or(20, NonZeroU32::get) + } + pub fn fee_history_limit(&self) -> u64 { self.fee_history_limit.unwrap_or(1024) } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 960808aa6a69..0fdd927d19f0 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -86,6 +86,7 @@ impl Distribution for EncodeDist { factory_deps_cache_size_mb: self.sample(rng), initial_writes_cache_size_mb: self.sample(rng), latest_values_cache_size_mb: self.sample(rng), + latest_values_max_block_lag: self.sample(rng), fee_history_limit: self.sample(rng), max_batch_request_size: self.sample(rng), max_response_body_size_mb: self.sample(rng), diff --git a/core/lib/env_config/src/api.rs b/core/lib/env_config/src/api.rs index 53efea9a7848..ecc2343d49f4 100644 --- a/core/lib/env_config/src/api.rs +++ b/core/lib/env_config/src/api.rs @@ -76,6 +76,7 @@ mod tests { factory_deps_cache_size_mb: Some(128), initial_writes_cache_size_mb: Some(32), latest_values_cache_size_mb: Some(256), + latest_values_max_block_lag: Some(NonZeroU32::new(50).unwrap()), fee_history_limit: Some(100), max_batch_request_size: Some(200), max_response_body_size_mb: Some(10), @@ -136,6 +137,7 @@ mod tests { API_WEB3_JSON_RPC_FACTORY_DEPS_CACHE_SIZE_MB=128 API_WEB3_JSON_RPC_INITIAL_WRITES_CACHE_SIZE_MB=32 API_WEB3_JSON_RPC_LATEST_VALUES_CACHE_SIZE_MB=256 + API_WEB3_JSON_RPC_LATEST_VALUES_MAX_BLOCK_LAG=50 API_WEB3_JSON_RPC_FEE_HISTORY_LIMIT=100 API_WEB3_JSON_RPC_MAX_BATCH_REQUEST_SIZE=200 API_WEB3_JSON_RPC_WEBSOCKET_REQUESTS_PER_MINUTE_LIMIT=10 diff --git a/core/lib/protobuf_config/src/api.rs b/core/lib/protobuf_config/src/api.rs index a0c3825228af..3db80c6d691a 100644 --- a/core/lib/protobuf_config/src/api.rs +++ b/core/lib/protobuf_config/src/api.rs @@ -1,4 +1,4 @@ -use std::num::NonZeroUsize; +use std::num::{NonZeroU32, NonZeroUsize}; use anyhow::Context as _; use zksync_config::configs::{api, ApiConfig}; @@ -113,6 +113,11 @@ impl ProtoRepr for proto::Web3JsonRpc { .map(|x| x.try_into()) .transpose() .context("latest_values_cache_size_mb")?, + latest_values_max_block_lag: self + .latest_values_max_block_lag + .map(|x| x.try_into()) + .transpose() + .context("latest_values_max_block_lag")?, fee_history_limit: self.fee_history_limit, max_batch_request_size: self .max_batch_request_size @@ -183,6 +188,7 @@ impl ProtoRepr for proto::Web3JsonRpc { latest_values_cache_size_mb: this .latest_values_cache_size_mb .map(|x| x.try_into().unwrap()), + latest_values_max_block_lag: this.latest_values_max_block_lag.map(NonZeroU32::get), fee_history_limit: this.fee_history_limit, max_batch_request_size: this.max_batch_request_size.map(|x| x.try_into().unwrap()), max_response_body_size_mb: this diff --git a/core/lib/protobuf_config/src/proto/config/api.proto b/core/lib/protobuf_config/src/proto/config/api.proto index 68475e442fd6..89ba0a6bcd2c 100644 --- a/core/lib/protobuf_config/src/proto/config/api.proto +++ b/core/lib/protobuf_config/src/proto/config/api.proto @@ -41,6 +41,7 @@ message Web3JsonRpc { repeated string api_namespaces = 32; // Optional, if empty all namespaces are available optional bool extended_api_tracing = 33; // optional, default false optional bool estimate_gas_optimize_search = 34; // optional, default false + optional uint32 latest_values_max_block_lag = 35; // optional reserved 15; reserved "l1_to_l2_transactions_compatibility_mode"; reserved 11; reserved "request_timeout"; diff --git a/core/lib/state/src/cache/lru_cache.rs b/core/lib/state/src/cache/lru_cache.rs index fa37bdb3e227..55b037bbb8c2 100644 --- a/core/lib/state/src/cache/lru_cache.rs +++ b/core/lib/state/src/cache/lru_cache.rs @@ -46,6 +46,13 @@ where Self { name, cache } } + /// Returns the capacity of this cache in bytes. + pub fn capacity(&self) -> u64 { + self.cache + .as_ref() + .map_or(0, |cache| cache.policy().max_capacity().unwrap_or(u64::MAX)) + } + /// Gets an entry and pulls it to the front if it exists. pub fn get(&self, key: &K) -> Option { let latency = METRICS.latency[&(self.name, Method::Get)].start(); diff --git a/core/lib/state/src/postgres/mod.rs b/core/lib/state/src/postgres/mod.rs index 67866634ee4b..f689f1487f35 100644 --- a/core/lib/state/src/postgres/mod.rs +++ b/core/lib/state/src/postgres/mod.rs @@ -72,8 +72,7 @@ impl CacheValue for TimestampedStorageValue { #[allow(clippy::cast_possible_truncation)] // doesn't happen in practice fn cache_weight(&self) -> u32 { const WEIGHT: usize = mem::size_of::() + mem::size_of::(); - // ^ Since values are small in size, we want to account for key sizes as well - + // ^ Since values are small, we want to account for key sizes as well WEIGHT as u32 } } @@ -114,6 +113,14 @@ impl ValuesCache { Self(Arc::new(RwLock::new(inner))) } + fn capacity(&self) -> u64 { + self.0 + .read() + .expect("values cache is poisoned") + .values + .capacity() + } + /// *NB.* The returned value should be considered immediately stale; at best, it can be /// the lower boundary on the current `valid_for` value. fn valid_for(&self) -> L2BlockNumber { @@ -154,80 +161,86 @@ impl ValuesCache { } } + fn reset( + &self, + from_l2_block: L2BlockNumber, + to_l2_block: L2BlockNumber, + ) -> anyhow::Result<()> { + // We can spend too much time loading data from Postgres, so we opt for an easier "update" route: + // evict *everything* from cache and call it a day. This should not happen too often in practice. + tracing::info!( + "Storage values cache is too far behind (current L2 block is {from_l2_block}; \ + requested update to {to_l2_block}); resetting the cache" + ); + let mut lock = self + .0 + .write() + .map_err(|_| anyhow::anyhow!("values cache is poisoned"))?; + anyhow::ensure!( + lock.valid_for == from_l2_block, + "sanity check failed: values cache was expected to be valid for L2 block #{from_l2_block}, but it's actually \ + valid for L2 block #{}", + lock.valid_for + ); + lock.valid_for = to_l2_block; + lock.values.clear(); + + CACHE_METRICS.values_emptied.inc(); + CACHE_METRICS + .values_valid_for_miniblock + .set(u64::from(to_l2_block.0)); + Ok(()) + } + async fn update( &self, from_l2_block: L2BlockNumber, to_l2_block: L2BlockNumber, connection: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { - const MAX_L2_BLOCKS_LAG: u32 = 5; - tracing::debug!( "Updating storage values cache from L2 block {from_l2_block} to {to_l2_block}" ); - if to_l2_block.0 - from_l2_block.0 > MAX_L2_BLOCKS_LAG { - // We can spend too much time loading data from Postgres, so we opt for an easier "update" route: - // evict *everything* from cache and call it a day. This should not happen too often in practice. - tracing::info!( - "Storage values cache is too far behind (current L2 block is {from_l2_block}; \ - requested update to {to_l2_block}); resetting the cache" - ); - let mut lock = self - .0 - .write() - .map_err(|_| anyhow::anyhow!("values cache is poisoned"))?; - anyhow::ensure!( - lock.valid_for == from_l2_block, - "sanity check failed: values cache was expected to be valid for L2 block #{from_l2_block}, but it's actually \ - valid for L2 block #{}", - lock.valid_for - ); - lock.valid_for = to_l2_block; - lock.values.clear(); + let update_latency = CACHE_METRICS.values_update[&ValuesUpdateStage::LoadKeys].start(); + let l2_blocks = (from_l2_block + 1)..=to_l2_block; + let modified_keys = connection + .storage_logs_dal() + .modified_keys_in_l2_blocks(l2_blocks.clone()) + .await?; - CACHE_METRICS.values_emptied.inc(); - } else { - let update_latency = CACHE_METRICS.values_update[&ValuesUpdateStage::LoadKeys].start(); - let l2_blocks = (from_l2_block + 1)..=to_l2_block; - let modified_keys = connection - .storage_logs_dal() - .modified_keys_in_l2_blocks(l2_blocks.clone()) - .await?; - - let elapsed = update_latency.observe(); - CACHE_METRICS - .values_update_modified_keys - .observe(modified_keys.len()); - tracing::debug!( - "Loaded {modified_keys_len} modified storage keys from L2 blocks {l2_blocks:?}; \ - took {elapsed:?}", - modified_keys_len = modified_keys.len() - ); + let elapsed = update_latency.observe(); + CACHE_METRICS + .values_update_modified_keys + .observe(modified_keys.len()); + tracing::debug!( + "Loaded {modified_keys_len} modified storage keys from L2 blocks {l2_blocks:?}; \ + took {elapsed:?}", + modified_keys_len = modified_keys.len() + ); - let update_latency = - CACHE_METRICS.values_update[&ValuesUpdateStage::RemoveStaleKeys].start(); - let mut lock = self - .0 - .write() - .map_err(|_| anyhow::anyhow!("values cache is poisoned"))?; - // The code below holding onto the write `lock` is the only code that can theoretically poison the `RwLock` - // (other than emptying the cache above). Thus, it's kept as simple and tight as possible. - // E.g., we load data from Postgres beforehand. - anyhow::ensure!( - lock.valid_for == from_l2_block, - "sanity check failed: values cache was expected to be valid for L2 block #{from_l2_block}, but it's actually \ - valid for L2 block #{}", - lock.valid_for - ); - lock.valid_for = to_l2_block; - for modified_key in &modified_keys { - lock.values.remove(modified_key); - } - lock.values.report_size(); - drop(lock); - update_latency.observe(); + let update_latency = + CACHE_METRICS.values_update[&ValuesUpdateStage::RemoveStaleKeys].start(); + let mut lock = self + .0 + .write() + .map_err(|_| anyhow::anyhow!("values cache is poisoned"))?; + // The code below holding onto the write `lock` is the only code that can theoretically poison the `RwLock` + // (other than emptying the cache above). Thus, it's kept as simple and tight as possible. + // E.g., we load data from Postgres beforehand. + anyhow::ensure!( + lock.valid_for == from_l2_block, + "sanity check failed: values cache was expected to be valid for L2 block #{from_l2_block}, but it's actually \ + valid for L2 block #{}", + lock.valid_for + ); + lock.valid_for = to_l2_block; + for modified_key in &modified_keys { + lock.values.remove(modified_key); } + lock.values.report_size(); + drop(lock); + update_latency.observe(); CACHE_METRICS .values_valid_for_miniblock @@ -298,6 +311,7 @@ impl PostgresStorageCaches { pub fn configure_storage_values_cache( &mut self, capacity: u64, + max_l2_blocks_lag: u32, connection_pool: ConnectionPool, ) -> PostgresStorageCachesTask { assert!( @@ -320,6 +334,7 @@ impl PostgresStorageCaches { PostgresStorageCachesTask { connection_pool, values_cache, + max_l2_blocks_lag, command_receiver, } } @@ -349,6 +364,7 @@ impl PostgresStorageCaches { pub struct PostgresStorageCachesTask { connection_pool: ConnectionPool, values_cache: ValuesCache, + max_l2_blocks_lag: u32, command_receiver: UnboundedReceiver, } @@ -359,32 +375,41 @@ impl PostgresStorageCachesTask { /// /// - Propagates Postgres errors. /// - Propagates errors from the cache update task. + #[tracing::instrument(name = "PostgresStorageCachesTask::run", skip_all)] pub async fn run(mut self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + tracing::info!( + max_l2_blocks_lag = self.max_l2_blocks_lag, + values_cache.capacity = self.values_cache.capacity(), + "Starting task" + ); + let mut current_l2_block = self.values_cache.valid_for(); loop { - tokio::select! { - _ = stop_receiver.changed() => { - break; - } - Some(to_l2_block) = self.command_receiver.recv() => { - if to_l2_block <= current_l2_block { - continue; - } - let mut connection = self - .connection_pool - .connection_tagged("values_cache_updater") - .await?; - self.values_cache - .update(current_l2_block, to_l2_block, &mut connection) - .await?; - current_l2_block = to_l2_block; - } + let to_l2_block = tokio::select! { + _ = stop_receiver.changed() => break, + Some(to_l2_block) = self.command_receiver.recv() => to_l2_block, else => { // The command sender has been dropped, which means that we must receive the stop signal soon. stop_receiver.changed().await?; break; } + }; + if to_l2_block <= current_l2_block { + continue; + } + + if to_l2_block.0 - current_l2_block.0 > self.max_l2_blocks_lag { + self.values_cache.reset(current_l2_block, to_l2_block)?; + } else { + let mut connection = self + .connection_pool + .connection_tagged("values_cache_updater") + .await?; + self.values_cache + .update(current_l2_block, to_l2_block, &mut connection) + .await?; } + current_l2_block = to_l2_block; } Ok(()) } diff --git a/core/lib/state/src/postgres/tests.rs b/core/lib/state/src/postgres/tests.rs index f88055fa0479..029df60cb461 100644 --- a/core/lib/state/src/postgres/tests.rs +++ b/core/lib/state/src/postgres/tests.rs @@ -462,7 +462,7 @@ async fn wait_for_cache_update(values_cache: &ValuesCache, target_l2_block: L2Bl fn test_values_cache(pool: &ConnectionPool, rt_handle: Handle) { let mut caches = PostgresStorageCaches::new(1_024, 1_024); - let task = caches.configure_storage_values_cache(1_024 * 1_024, pool.clone()); + let task = caches.configure_storage_values_cache(1_024 * 1_024, 5, pool.clone()); let (stop_sender, stop_receiver) = watch::channel(false); let update_task_handle = tokio::task::spawn(task.run(stop_receiver)); @@ -595,7 +595,7 @@ fn mini_fuzz_values_cache_inner( mut rt_handle: Handle, ) { let mut caches = PostgresStorageCaches::new(1_024, 1_024); - let _ = caches.configure_storage_values_cache(1_024 * 1_024, pool.clone()); + let _ = caches.configure_storage_values_cache(1_024 * 1_024, 5, pool.clone()); let values_cache = caches.values.as_ref().unwrap().cache.clone(); let mut connection = rt_handle.block_on(pool.connection()).unwrap(); diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs index a09938055fae..ba1a69e23bb6 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs @@ -32,6 +32,7 @@ pub struct PostgresStorageCachesConfig { pub factory_deps_cache_size: u64, pub initial_writes_cache_size: u64, pub latest_values_cache_size: u64, + pub latest_values_max_block_lag: u32, } /// Wiring layer for the `TxSender`. @@ -133,10 +134,13 @@ impl WiringLayer for TxSenderLayer { PostgresStorageCaches::new(factory_deps_capacity, initial_writes_capacity); let postgres_storage_caches_task = if values_capacity > 0 { - Some( - storage_caches - .configure_storage_values_cache(values_capacity, replica_pool.clone()), - ) + let update_task = storage_caches.configure_storage_values_cache( + values_capacity, + self.postgres_storage_caches_config + .latest_values_max_block_lag, + replica_pool.clone(), + ); + Some(update_task) } else { None }; From 6918180e558de42c0a9c5f008fa128255b16680f Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 16 Oct 2024 11:04:06 +0300 Subject: [PATCH 2/9] =?UTF-8?q?test(api):=20Add=20tests=20for=20EVM=20emul?= =?UTF-8?q?ator=20=E2=80=93=20API=20(#3054)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Adds unit tests in the API server crate testing a mock EVM emulator. - Allows `to == None` for `eth_call` and `debug_traceCall` RPC methods if EVM emulation is enabled, to align with Ethereum node behavior. - Fixes an integer overflow when estimating gas for L1 / upgrade transactions. ## Why ❔ Ensures that EVM emulation will work as expected. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. fix(api): Allow `to == None` for `eth_call` and `debug_traceCall` fix(api): Avoid integer overflow when estimating gas for L1 / upgrade transactions --- core/lib/vm_executor/src/oneshot/block.rs | 34 +- core/lib/vm_executor/src/oneshot/contracts.rs | 77 +++- core/lib/vm_executor/src/oneshot/env.rs | 80 +--- core/lib/vm_executor/src/oneshot/mod.rs | 6 +- .../src/execution_sandbox/execute.rs | 9 +- .../src/tx_sender/gas_estimation.rs | 42 +- core/node/api_server/src/tx_sender/mod.rs | 27 +- core/node/api_server/src/tx_sender/result.rs | 30 +- .../src/tx_sender/tests/gas_estimation.rs | 9 +- .../api_server/src/web3/namespaces/debug.rs | 2 +- .../api_server/src/web3/namespaces/eth.rs | 2 +- core/node/api_server/src/web3/testonly.rs | 140 +++--- .../node/api_server/src/web3/tests/filters.rs | 4 +- core/node/api_server/src/web3/tests/mod.rs | 56 ++- core/node/api_server/src/web3/tests/vm.rs | 419 ++++++++++++++++-- core/node/api_server/src/web3/tests/ws.rs | 6 +- core/node/consensus/src/vm.rs | 17 +- 17 files changed, 695 insertions(+), 265 deletions(-) diff --git a/core/lib/vm_executor/src/oneshot/block.rs b/core/lib/vm_executor/src/oneshot/block.rs index c820ea794fe3..cc759c032fc1 100644 --- a/core/lib/vm_executor/src/oneshot/block.rs +++ b/core/lib/vm_executor/src/oneshot/block.rs @@ -14,7 +14,7 @@ use zksync_types::{ }; use zksync_utils::{h256_to_u256, time::seconds_since_epoch}; -use super::env::OneshotEnvParameters; +use super::{env::OneshotEnvParameters, ContractsKind}; /// Block information necessary to execute a transaction / call. Unlike [`ResolvedBlockInfo`], this information is *partially* resolved, /// which is beneficial for some data workflows. @@ -178,7 +178,7 @@ impl ResolvedBlockInfo { } } -impl OneshotEnvParameters { +impl OneshotEnvParameters { pub(super) async fn to_env_inner( &self, connection: &mut Connection<'_, Core>, @@ -194,13 +194,15 @@ impl OneshotEnvParameters { ) .await?; - let (system, l1_batch) = self.prepare_env( - execution_mode, - resolved_block_info, - next_block, - fee_input, - enforced_base_fee, - ); + let (system, l1_batch) = self + .prepare_env( + execution_mode, + resolved_block_info, + next_block, + fee_input, + enforced_base_fee, + ) + .await?; Ok(OneshotEnv { system, l1_batch, @@ -208,14 +210,14 @@ impl OneshotEnvParameters { }) } - fn prepare_env( + async fn prepare_env( &self, execution_mode: TxExecutionMode, resolved_block_info: &ResolvedBlockInfo, next_block: L2BlockEnv, fee_input: BatchFeeInput, enforced_base_fee: Option, - ) -> (SystemEnv, L1BatchEnv) { + ) -> anyhow::Result<(SystemEnv, L1BatchEnv)> { let &Self { operator_account, validation_computational_gas_limit, @@ -228,11 +230,9 @@ impl OneshotEnvParameters { version: resolved_block_info.protocol_version, base_system_smart_contracts: self .base_system_contracts - .get_by_protocol_version( - resolved_block_info.protocol_version, - resolved_block_info.use_evm_emulator, - ) - .clone(), + .base_system_contracts(resolved_block_info) + .await + .context("failed getting base system contracts")?, bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, execution_mode, default_validation_computational_gas_limit: validation_computational_gas_limit, @@ -247,7 +247,7 @@ impl OneshotEnvParameters { enforced_base_fee, first_l2_block: next_block, }; - (system_env, l1_batch_env) + Ok((system_env, l1_batch_env)) } } diff --git a/core/lib/vm_executor/src/oneshot/contracts.rs b/core/lib/vm_executor/src/oneshot/contracts.rs index bc433a070b3e..dc9ef0c0e8df 100644 --- a/core/lib/vm_executor/src/oneshot/contracts.rs +++ b/core/lib/vm_executor/src/oneshot/contracts.rs @@ -1,9 +1,52 @@ +use std::{fmt, marker::PhantomData}; + +use async_trait::async_trait; use zksync_contracts::BaseSystemContracts; use zksync_types::ProtocolVersionId; +use super::ResolvedBlockInfo; +use crate::shared::Sealed; + +/// Kind of base system contracts used as a marker in the [`BaseSystemContractsProvider`] trait. +pub trait ContractsKind: fmt::Debug + Sealed {} + +/// Marker for [`BaseSystemContracts`] used for gas estimation. +#[derive(Debug)] +pub struct EstimateGas(()); + +impl Sealed for EstimateGas {} +impl ContractsKind for EstimateGas {} + +/// Marker for [`BaseSystemContracts`] used for calls and transaction execution. +#[derive(Debug)] +pub struct CallOrExecute(()); + +impl Sealed for CallOrExecute {} +impl ContractsKind for CallOrExecute {} + +/// Provider of [`BaseSystemContracts`] for oneshot execution. +/// +/// The main implementation of this trait is [`MultiVMBaseSystemContracts`], which selects contracts +/// based on [`ProtocolVersionId`]. +#[async_trait] +pub trait BaseSystemContractsProvider: fmt::Debug + Send + Sync { + /// Returns base system contracts for executing a transaction on top of the provided block. + /// + /// Implementations are encouraged to cache returned contracts for performance; caching is **not** performed + /// by the caller. + /// + /// # Errors + /// + /// Returned errors are treated as unrecoverable for a particular execution, but further executions are not affected. + async fn base_system_contracts( + &self, + block_info: &ResolvedBlockInfo, + ) -> anyhow::Result; +} + /// System contracts (bootloader and default account abstraction) for all supported VM versions. -#[derive(Debug, Clone)] -pub(super) struct MultiVMBaseSystemContracts { +#[derive(Debug)] +pub struct MultiVMBaseSystemContracts { /// Contracts to be used for pre-virtual-blocks protocol versions. pre_virtual_blocks: BaseSystemContracts, /// Contracts to be used for post-virtual-blocks protocol versions. @@ -24,11 +67,12 @@ pub(super) struct MultiVMBaseSystemContracts { vm_1_5_0_increased_memory: BaseSystemContracts, /// Contracts to be used after the protocol defense upgrade vm_protocol_defense: BaseSystemContracts, + // We use `fn() -> C` marker so that the `MultiVMBaseSystemContracts` unconditionally implements `Send + Sync`. + _contracts_kind: PhantomData C>, } -impl MultiVMBaseSystemContracts { - /// Gets contracts for a certain version. - pub fn get_by_protocol_version( +impl MultiVMBaseSystemContracts { + fn get_by_protocol_version( &self, version: ProtocolVersionId, use_evm_emulator: bool, @@ -71,8 +115,11 @@ impl MultiVMBaseSystemContracts { base } } +} - pub(super) fn load_estimate_gas_blocking() -> Self { +impl MultiVMBaseSystemContracts { + /// Returned system contracts (mainly the bootloader) are tuned to provide accurate execution metrics. + pub fn load_estimate_gas_blocking() -> Self { Self { pre_virtual_blocks: BaseSystemContracts::estimate_gas_pre_virtual_blocks(), post_virtual_blocks: BaseSystemContracts::estimate_gas_post_virtual_blocks(), @@ -86,10 +133,14 @@ impl MultiVMBaseSystemContracts { vm_1_5_0_increased_memory: BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), vm_protocol_defense: BaseSystemContracts::estimate_gas_post_protocol_defense(), + _contracts_kind: PhantomData, } } +} - pub(super) fn load_eth_call_blocking() -> Self { +impl MultiVMBaseSystemContracts { + /// Returned system contracts (mainly the bootloader) are tuned to provide better UX (e.g. revert messages). + pub fn load_eth_call_blocking() -> Self { Self { pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), post_virtual_blocks: BaseSystemContracts::playground_post_virtual_blocks(), @@ -103,6 +154,18 @@ impl MultiVMBaseSystemContracts { vm_1_5_0_increased_memory: BaseSystemContracts::playground_post_1_5_0_increased_memory( ), vm_protocol_defense: BaseSystemContracts::playground_post_protocol_defense(), + _contracts_kind: PhantomData, } } } + +#[async_trait] +impl BaseSystemContractsProvider for MultiVMBaseSystemContracts { + async fn base_system_contracts( + &self, + block_info: &ResolvedBlockInfo, + ) -> anyhow::Result { + Ok(self + .get_by_protocol_version(block_info.protocol_version(), block_info.use_evm_emulator())) + } +} diff --git a/core/lib/vm_executor/src/oneshot/env.rs b/core/lib/vm_executor/src/oneshot/env.rs index 51154d561ec6..6d70c3cfde96 100644 --- a/core/lib/vm_executor/src/oneshot/env.rs +++ b/core/lib/vm_executor/src/oneshot/env.rs @@ -1,19 +1,12 @@ -use std::marker::PhantomData; +use std::sync::Arc; -use anyhow::Context; use zksync_dal::{Connection, Core}; use zksync_multivm::interface::{OneshotEnv, TxExecutionMode}; use zksync_types::{fee_model::BatchFeeInput, l2::L2Tx, AccountTreeId, L2ChainId}; -use crate::oneshot::{contracts::MultiVMBaseSystemContracts, ResolvedBlockInfo}; - -/// Marker for [`OneshotEnvParameters`] used for gas estimation. -#[derive(Debug)] -pub struct EstimateGas(()); - -/// Marker for [`OneshotEnvParameters`] used for calls and/or transaction execution. -#[derive(Debug)] -pub struct CallOrExecute(()); +use super::{ + BaseSystemContractsProvider, CallOrExecute, ContractsKind, EstimateGas, ResolvedBlockInfo, +}; /// Oneshot environment parameters that are expected to be constant or rarely change during the program lifetime. /// These parameters can be used to create [a full environment](OneshotEnv) for transaction / call execution. @@ -21,15 +14,29 @@ pub struct CallOrExecute(()); /// Notably, these parameters include base system contracts (bootloader and default account abstraction) for all supported /// VM versions. #[derive(Debug)] -pub struct OneshotEnvParameters { +pub struct OneshotEnvParameters { pub(super) chain_id: L2ChainId, - pub(super) base_system_contracts: MultiVMBaseSystemContracts, + pub(super) base_system_contracts: Arc>, pub(super) operator_account: AccountTreeId, pub(super) validation_computational_gas_limit: u32, - _ty: PhantomData, } -impl OneshotEnvParameters { +impl OneshotEnvParameters { + /// Creates env parameters. + pub fn new( + base_system_contracts: Arc>, + chain_id: L2ChainId, + operator_account: AccountTreeId, + validation_computational_gas_limit: u32, + ) -> Self { + Self { + chain_id, + base_system_contracts, + operator_account, + validation_computational_gas_limit, + } + } + /// Returns gas limit for account validation of transactions. pub fn validation_computational_gas_limit(&self) -> u32 { self.validation_computational_gas_limit @@ -37,27 +44,6 @@ impl OneshotEnvParameters { } impl OneshotEnvParameters { - /// Creates env parameters for gas estimation. - /// - /// System contracts (mainly, bootloader) for these params are tuned to provide accurate - /// execution metrics. - pub async fn for_gas_estimation( - chain_id: L2ChainId, - operator_account: AccountTreeId, - ) -> anyhow::Result { - Ok(Self { - chain_id, - base_system_contracts: tokio::task::spawn_blocking( - MultiVMBaseSystemContracts::load_estimate_gas_blocking, - ) - .await - .context("failed loading system contracts for gas estimation")?, - operator_account, - validation_computational_gas_limit: u32::MAX, - _ty: PhantomData, - }) - } - /// Prepares environment for gas estimation. pub async fn to_env( &self, @@ -78,28 +64,6 @@ impl OneshotEnvParameters { } impl OneshotEnvParameters { - /// Creates env parameters for transaction / call execution. - /// - /// System contracts (mainly, bootloader) for these params tuned to provide better UX - /// experience (e.g. revert messages). - pub async fn for_execution( - chain_id: L2ChainId, - operator_account: AccountTreeId, - validation_computational_gas_limit: u32, - ) -> anyhow::Result { - Ok(Self { - chain_id, - base_system_contracts: tokio::task::spawn_blocking( - MultiVMBaseSystemContracts::load_eth_call_blocking, - ) - .await - .context("failed loading system contracts for calls")?, - operator_account, - validation_computational_gas_limit, - _ty: PhantomData, - }) - } - /// Prepares environment for a call. pub async fn to_call_env( &self, diff --git a/core/lib/vm_executor/src/oneshot/mod.rs b/core/lib/vm_executor/src/oneshot/mod.rs index cb75f396b5d5..018e5abded6f 100644 --- a/core/lib/vm_executor/src/oneshot/mod.rs +++ b/core/lib/vm_executor/src/oneshot/mod.rs @@ -40,7 +40,11 @@ use zksync_utils::{h256_to_u256, u256_to_h256}; pub use self::{ block::{BlockInfo, ResolvedBlockInfo}, - env::{CallOrExecute, EstimateGas, OneshotEnvParameters}, + contracts::{ + BaseSystemContractsProvider, CallOrExecute, ContractsKind, EstimateGas, + MultiVMBaseSystemContracts, + }, + env::OneshotEnvParameters, mock::MockOneshotExecutor, }; diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index 14ac37e59368..bdd574625888 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -119,9 +119,16 @@ impl SandboxExecutor { } pub(crate) async fn mock(executor: MockOneshotExecutor) -> Self { + Self::custom_mock(executor, SandboxExecutorOptions::mock().await) + } + + pub(crate) fn custom_mock( + executor: MockOneshotExecutor, + options: SandboxExecutorOptions, + ) -> Self { Self { engine: SandboxExecutorEngine::Mock(executor), - options: SandboxExecutorOptions::mock().await, + options, storage_caches: None, } } diff --git a/core/node/api_server/src/tx_sender/gas_estimation.rs b/core/node/api_server/src/tx_sender/gas_estimation.rs index 44e568ce4183..b4a05a0756b6 100644 --- a/core/node/api_server/src/tx_sender/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/gas_estimation.rs @@ -131,10 +131,7 @@ impl TxSender { if let Some(pivot) = initial_pivot { let iteration_started_at = Instant::now(); - let (result, _) = estimator - .step(pivot) - .await - .context("estimate_gas step failed")?; + let (result, _) = estimator.step(pivot).await?; Self::adjust_search_bounds(&mut lower_bound, &mut upper_bound, pivot, &result); tracing::trace!( @@ -151,10 +148,7 @@ impl TxSender { // or normal execution errors, so we just hope that increasing the // gas limit will make the transaction successful let iteration_started_at = Instant::now(); - let (result, _) = estimator - .step(mid) - .await - .context("estimate_gas step failed")?; + let (result, _) = estimator.step(mid).await?; Self::adjust_search_bounds(&mut lower_bound, &mut upper_bound, mid, &result); tracing::trace!( @@ -206,7 +200,11 @@ impl TxSender { tx.initiator_account(), tx.execute.value ); - return Err(SubmitTxError::InsufficientFundsForTransfer); + return Err(SubmitTxError::NotEnoughBalanceForFeeValue( + balance, + 0.into(), + tx.execute.value, + )); } } Ok(()) @@ -393,10 +391,7 @@ impl<'a> GasEstimator<'a> { // For L2 transactions, we estimate the amount of gas needed to cover for the pubdata by creating a transaction with infinite gas limit, // and getting how much pubdata it used. - let (result, _) = self - .unadjusted_step(self.max_gas_limit) - .await - .context("estimate_gas step failed")?; + let (result, _) = self.unadjusted_step(self.max_gas_limit).await?; // If the transaction has failed with such a large gas limit, we return an API error here right away, // since the inferred gas bounds would be unreliable in this case. result.check_api_call_result()?; @@ -430,7 +425,7 @@ impl<'a> GasEstimator<'a> { async fn step( &self, tx_gas_limit: u64, - ) -> anyhow::Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics)> { + ) -> Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics), SubmitTxError> { let gas_limit_with_overhead = tx_gas_limit + self.tx_overhead(tx_gas_limit); // We need to ensure that we never use a gas limit that is higher than the maximum allowed let forced_gas_limit = @@ -441,13 +436,16 @@ impl<'a> GasEstimator<'a> { pub(super) async fn unadjusted_step( &self, forced_gas_limit: u64, - ) -> anyhow::Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics)> { + ) -> Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics), SubmitTxError> { let mut tx = self.transaction.clone(); match &mut tx.common_data { ExecuteTransactionCommon::L1(l1_common_data) => { l1_common_data.gas_limit = forced_gas_limit.into(); - let required_funds = - l1_common_data.gas_limit * l1_common_data.max_fee_per_gas + tx.execute.value; + // Since `tx.execute.value` is supplied by the client and is not checked against the current balance (unlike for L2 transactions), + // we may hit an integer overflow. Ditto for protocol upgrade transactions below. + let required_funds = (l1_common_data.gas_limit * l1_common_data.max_fee_per_gas) + .checked_add(tx.execute.value) + .ok_or(SubmitTxError::MintedAmountOverflow)?; l1_common_data.to_mint = required_funds; } ExecuteTransactionCommon::L2(l2_common_data) => { @@ -455,8 +453,9 @@ impl<'a> GasEstimator<'a> { } ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { common_data.gas_limit = forced_gas_limit.into(); - let required_funds = - common_data.gas_limit * common_data.max_fee_per_gas + tx.execute.value; + let required_funds = (common_data.gas_limit * common_data.max_fee_per_gas) + .checked_add(tx.execute.value) + .ok_or(SubmitTxError::MintedAmountOverflow)?; common_data.to_mint = required_funds; } } @@ -485,10 +484,7 @@ impl<'a> GasEstimator<'a> { suggested_gas_limit: u64, estimated_fee_scale_factor: f64, ) -> Result { - let (result, tx_metrics) = self - .step(suggested_gas_limit) - .await - .context("final estimate_gas step failed")?; + let (result, tx_metrics) = self.step(suggested_gas_limit).await?; result.into_api_call_result()?; self.sender .ensure_tx_executable(&self.transaction, &tx_metrics, false)?; diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 2dbc0d5a0dd6..38794fe71371 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -29,7 +29,9 @@ use zksync_types::{ MAX_NEW_FACTORY_DEPS, U256, }; use zksync_utils::h256_to_u256; -use zksync_vm_executor::oneshot::{CallOrExecute, EstimateGas, OneshotEnvParameters}; +use zksync_vm_executor::oneshot::{ + CallOrExecute, EstimateGas, MultiVMBaseSystemContracts, OneshotEnvParameters, +}; pub(super) use self::{gas_estimation::BinarySearchKind, result::SubmitTxError}; use self::{master_pool_sink::MasterPoolSink, result::ApiCallResult, tx_sink::TxSink}; @@ -102,15 +104,28 @@ impl SandboxExecutorOptions { operator_account: AccountTreeId, validation_computational_gas_limit: u32, ) -> anyhow::Result { + let estimate_gas_contracts = + tokio::task::spawn_blocking(MultiVMBaseSystemContracts::load_estimate_gas_blocking) + .await + .context("failed loading base contracts for gas estimation")?; + let call_contracts = + tokio::task::spawn_blocking(MultiVMBaseSystemContracts::load_eth_call_blocking) + .await + .context("failed loading base contracts for calls / tx execution")?; + Ok(Self { - estimate_gas: OneshotEnvParameters::for_gas_estimation(chain_id, operator_account) - .await?, - eth_call: OneshotEnvParameters::for_execution( + estimate_gas: OneshotEnvParameters::new( + Arc::new(estimate_gas_contracts), + chain_id, + operator_account, + u32::MAX, + ), + eth_call: OneshotEnvParameters::new( + Arc::new(call_contracts), chain_id, operator_account, validation_computational_gas_limit, - ) - .await?, + ), }) } diff --git a/core/node/api_server/src/tx_sender/result.rs b/core/node/api_server/src/tx_sender/result.rs index a49313f0dd62..e2a51ae8e9a8 100644 --- a/core/node/api_server/src/tx_sender/result.rs +++ b/core/node/api_server/src/tx_sender/result.rs @@ -24,8 +24,6 @@ pub enum SubmitTxError { GasLimitIsTooBig, #[error("{0}")] Unexecutable(String), - #[error("too many transactions")] - RateLimitExceeded, #[error("server shutting down")] ServerShuttingDown, #[error("failed to include transaction in the system. reason: {0}")] @@ -49,29 +47,23 @@ pub enum SubmitTxError { that caused this error. Error description: {0}" )] UnexpectedVMBehavior(String), - #[error("pubdata price limit is too low, ensure that the price limit is correct")] - UnrealisticPubdataPriceLimit, #[error( "too many factory dependencies in the transaction. {0} provided, while only {1} allowed" )] TooManyFactoryDependencies(usize, usize), - #[error("max fee per gas higher than 2^32")] - FeePerGasTooHigh, - #[error("max fee per pubdata byte higher than 2^32")] - FeePerPubdataByteTooHigh, - /// InsufficientFundsForTransfer is returned if the transaction sender doesn't - /// have enough funds for transfer. - #[error("insufficient balance for transfer")] - InsufficientFundsForTransfer, /// IntrinsicGas is returned if the transaction is specified to use less gas /// than required to start the invocation. #[error("intrinsic gas too low")] IntrinsicGas, - /// Error returned from main node - #[error("{0}")] - ProxyError(#[from] EnrichedClientError), #[error("not enough gas to publish compressed bytecodes")] FailedToPublishCompressedBytecodes, + /// Currently only triggered during gas estimation for L1 and protocol upgrade transactions. + #[error("integer overflow computing base token amount to mint")] + MintedAmountOverflow, + + /// Error returned from main node. + #[error("{0}")] + ProxyError(#[from] EnrichedClientError), /// Catch-all internal error (e.g., database error) that should not be exposed to the caller. #[error("internal error")] Internal(#[from] anyhow::Error), @@ -88,7 +80,6 @@ impl SubmitTxError { Self::ExecutionReverted(_, _) => "execution-reverted", Self::GasLimitIsTooBig => "gas-limit-is-too-big", Self::Unexecutable(_) => "unexecutable", - Self::RateLimitExceeded => "rate-limit-exceeded", Self::ServerShuttingDown => "shutting-down", Self::BootloaderFailure(_) => "bootloader-failure", Self::ValidationFailed(_) => "validation-failed", @@ -99,14 +90,11 @@ impl SubmitTxError { Self::MaxFeePerGasTooLow => "max-fee-per-gas-too-low", Self::MaxPriorityFeeGreaterThanMaxFee => "max-priority-fee-greater-than-max-fee", Self::UnexpectedVMBehavior(_) => "unexpected-vm-behavior", - Self::UnrealisticPubdataPriceLimit => "unrealistic-pubdata-price-limit", Self::TooManyFactoryDependencies(_, _) => "too-many-factory-dependencies", - Self::FeePerGasTooHigh => "gas-price-limit-too-high", - Self::FeePerPubdataByteTooHigh => "pubdata-price-limit-too-high", - Self::InsufficientFundsForTransfer => "insufficient-funds-for-transfer", Self::IntrinsicGas => "intrinsic-gas", - Self::ProxyError(_) => "proxy-error", Self::FailedToPublishCompressedBytecodes => "failed-to-publish-compressed-bytecodes", + Self::MintedAmountOverflow => "minted-amount-overflow", + Self::ProxyError(_) => "proxy-error", Self::Internal(_) => "internal", } } diff --git a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs index 3fd5fcb51881..4528d9cda12f 100644 --- a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs @@ -298,7 +298,8 @@ async fn insufficient_funds_error_for_transfer() { let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); - let tx = alice.create_transfer(1_000_000_000.into()); + let transferred_value = 1_000_000_000.into(); + let tx = alice.create_transfer(transferred_value); let fee_scale_factor = 1.0; // Without overrides, the transaction should fail because of insufficient balance. let err = tx_sender @@ -312,7 +313,11 @@ async fn insufficient_funds_error_for_transfer() { ) .await .unwrap_err(); - assert_matches!(err, SubmitTxError::InsufficientFundsForTransfer); + assert_matches!( + err, + SubmitTxError::NotEnoughBalanceForFeeValue(balance, fee, value) + if balance.is_zero() && fee.is_zero() && value == transferred_value + ); } async fn test_estimating_gas( diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index 7e99808dbc77..e296fe87faa2 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -262,7 +262,7 @@ impl DebugNamespace { let call = L2Tx::from_request( request.into(), MAX_ENCODED_TX_SIZE, - false, // Even with EVM emulation enabled, calls must specify `to` field + block_args.use_evm_emulator(), )?; let vm_permit = self diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index 4439fc257cfb..5206cd3bc2bb 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -81,7 +81,7 @@ impl EthNamespace { let tx = L2Tx::from_request( request.into(), self.state.api_config.max_tx_size, - false, // Even with EVM emulation enabled, calls must specify `to` field + block_args.use_evm_emulator(), )?; // It is assumed that the previous checks has already enforced that the `max_fee_per_gas` is at most u64. diff --git a/core/node/api_server/src/web3/testonly.rs b/core/node/api_server/src/web3/testonly.rs index 3b05e235c6d4..2d642b9a04b8 100644 --- a/core/node/api_server/src/web3/testonly.rs +++ b/core/node/api_server/src/web3/testonly.rs @@ -13,7 +13,10 @@ use zksync_types::L2ChainId; use zksync_vm_executor::oneshot::MockOneshotExecutor; use super::{metrics::ApiTransportLabel, *}; -use crate::{execution_sandbox::SandboxExecutor, tx_sender::TxSenderConfig}; +use crate::{ + execution_sandbox::SandboxExecutor, + tx_sender::{SandboxExecutorOptions, TxSenderConfig}, +}; const TEST_TIMEOUT: Duration = Duration::from_secs(90); const POLL_INTERVAL: Duration = Duration::from_millis(50); @@ -103,6 +106,7 @@ pub struct TestServerBuilder { pool: ConnectionPool, api_config: InternalApiConfig, tx_executor: MockOneshotExecutor, + executor_options: Option, method_tracer: Arc, } @@ -113,6 +117,7 @@ impl TestServerBuilder { api_config, pool, tx_executor: MockOneshotExecutor::default(), + executor_options: None, method_tracer: Arc::default(), } } @@ -131,19 +136,17 @@ impl TestServerBuilder { self } + #[must_use] + pub fn with_executor_options(mut self, options: SandboxExecutorOptions) -> Self { + self.executor_options = Some(options); + self + } + /// Builds an HTTP server. pub async fn build_http(self, stop_receiver: watch::Receiver) -> ApiServerHandles { - spawn_server( - ApiTransportLabel::Http, - self.api_config, - self.pool, - None, - self.tx_executor, - self.method_tracer, - stop_receiver, - ) - .await - .0 + self.spawn_server(ApiTransportLabel::Http, None, stop_receiver) + .await + .0 } /// Builds a WS server. @@ -152,64 +155,73 @@ impl TestServerBuilder { websocket_requests_per_minute_limit: Option, stop_receiver: watch::Receiver, ) -> (ApiServerHandles, mpsc::UnboundedReceiver) { - spawn_server( + self.spawn_server( ApiTransportLabel::Ws, - self.api_config, - self.pool, websocket_requests_per_minute_limit, - self.tx_executor, - self.method_tracer, stop_receiver, ) .await } -} -async fn spawn_server( - transport: ApiTransportLabel, - api_config: InternalApiConfig, - pool: ConnectionPool, - websocket_requests_per_minute_limit: Option, - tx_executor: MockOneshotExecutor, - method_tracer: Arc, - stop_receiver: watch::Receiver, -) -> (ApiServerHandles, mpsc::UnboundedReceiver) { - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, vm_barrier) = - create_test_tx_sender(pool.clone(), api_config.l2_chain_id, tx_executor).await; - let (pub_sub_events_sender, pub_sub_events_receiver) = mpsc::unbounded_channel(); - - let mut namespaces = Namespace::DEFAULT.to_vec(); - namespaces.extend([Namespace::Debug, Namespace::Snapshots, Namespace::Unstable]); - let sealed_l2_block_handle = SealedL2BlockNumber::default(); - let bridge_addresses_handle = BridgeAddressesHandle::new(api_config.bridge_addresses.clone()); - - let server_builder = match transport { - ApiTransportLabel::Http => ApiBuilder::jsonrpsee_backend(api_config, pool).http(0), - ApiTransportLabel::Ws => { - let mut builder = ApiBuilder::jsonrpsee_backend(api_config, pool) - .ws(0) - .with_subscriptions_limit(100); - if let Some(websocket_requests_per_minute_limit) = websocket_requests_per_minute_limit { - builder = builder - .with_websocket_requests_per_minute_limit(websocket_requests_per_minute_limit); + async fn spawn_server( + self, + transport: ApiTransportLabel, + websocket_requests_per_minute_limit: Option, + stop_receiver: watch::Receiver, + ) -> (ApiServerHandles, mpsc::UnboundedReceiver) { + let Self { + tx_executor, + executor_options, + pool, + api_config, + method_tracer, + } = self; + + let tx_executor = if let Some(options) = executor_options { + SandboxExecutor::custom_mock(tx_executor, options) + } else { + SandboxExecutor::mock(tx_executor).await + }; + let (tx_sender, vm_barrier) = + create_test_tx_sender(pool.clone(), api_config.l2_chain_id, tx_executor).await; + let (pub_sub_events_sender, pub_sub_events_receiver) = mpsc::unbounded_channel(); + + let mut namespaces = Namespace::DEFAULT.to_vec(); + namespaces.extend([Namespace::Debug, Namespace::Snapshots, Namespace::Unstable]); + let sealed_l2_block_handle = SealedL2BlockNumber::default(); + let bridge_addresses_handle = + BridgeAddressesHandle::new(api_config.bridge_addresses.clone()); + + let server_builder = match transport { + ApiTransportLabel::Http => ApiBuilder::jsonrpsee_backend(api_config, pool).http(0), + ApiTransportLabel::Ws => { + let mut builder = ApiBuilder::jsonrpsee_backend(api_config, pool) + .ws(0) + .with_subscriptions_limit(100); + if let Some(websocket_requests_per_minute_limit) = + websocket_requests_per_minute_limit + { + builder = builder.with_websocket_requests_per_minute_limit( + websocket_requests_per_minute_limit, + ); + } + builder } - builder - } - }; - let server_handles = server_builder - .with_polling_interval(POLL_INTERVAL) - .with_tx_sender(tx_sender) - .with_vm_barrier(vm_barrier) - .with_pub_sub_events(pub_sub_events_sender) - .with_method_tracer(method_tracer) - .enable_api_namespaces(namespaces) - .with_sealed_l2_block_handle(sealed_l2_block_handle) - .with_bridge_addresses_handle(bridge_addresses_handle) - .build() - .expect("Unable to build API server") - .run(stop_receiver) - .await - .expect("Failed spawning JSON-RPC server"); - (server_handles, pub_sub_events_receiver) + }; + let server_handles = server_builder + .with_polling_interval(POLL_INTERVAL) + .with_tx_sender(tx_sender) + .with_vm_barrier(vm_barrier) + .with_pub_sub_events(pub_sub_events_sender) + .with_method_tracer(method_tracer) + .enable_api_namespaces(namespaces) + .with_sealed_l2_block_handle(sealed_l2_block_handle) + .with_bridge_addresses_handle(bridge_addresses_handle) + .build() + .expect("Unable to build API server") + .run(stop_receiver) + .await + .expect("Failed spawning JSON-RPC server"); + (server_handles, pub_sub_events_receiver) + } } diff --git a/core/node/api_server/src/web3/tests/filters.rs b/core/node/api_server/src/web3/tests/filters.rs index 7342ce7e979f..c865526815d1 100644 --- a/core/node/api_server/src/web3/tests/filters.rs +++ b/core/node/api_server/src/web3/tests/filters.rs @@ -23,7 +23,7 @@ impl HttpTest for BasicFilterChangesTest { if self.snapshot_recovery { StorageInitialization::empty_recovery() } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } @@ -109,7 +109,7 @@ impl HttpTest for LogFilterChangesTest { if self.snapshot_recovery { StorageInitialization::empty_recovery() } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index a8d90c281a75..c83279709a30 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -16,6 +16,7 @@ use zksync_config::{ }, GenesisConfig, }; +use zksync_contracts::BaseSystemContracts; use zksync_dal::{transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, CoreDal}; use zksync_multivm::interface::{ TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, @@ -66,6 +67,7 @@ use zksync_web3_decl::{ use super::*; use crate::{ testonly::{PROCESSED_EVM_BYTECODE, RAW_EVM_BYTECODE}, + tx_sender::SandboxExecutorOptions, web3::testonly::TestServerBuilder, }; @@ -143,13 +145,18 @@ async fn setting_response_size_limits() { trait HttpTest: Send + Sync { /// Prepares the storage before the server is started. The default implementation performs genesis. fn storage_initialization(&self) -> StorageInitialization { - StorageInitialization::Genesis + StorageInitialization::genesis() } fn transaction_executor(&self) -> MockOneshotExecutor { MockOneshotExecutor::default() } + /// Allows to override sandbox executor options. + fn executor_options(&self) -> Option { + None + } + fn method_tracer(&self) -> Arc { Arc::default() } @@ -166,7 +173,9 @@ trait HttpTest: Send + Sync { /// Storage initialization strategy. #[derive(Debug)] enum StorageInitialization { - Genesis, + Genesis { + evm_emulator: bool, + }, Recovery { logs: Vec, factory_deps: HashMap>, @@ -177,6 +186,16 @@ impl StorageInitialization { const SNAPSHOT_RECOVERY_BATCH: L1BatchNumber = L1BatchNumber(23); const SNAPSHOT_RECOVERY_BLOCK: L2BlockNumber = L2BlockNumber(23); + const fn genesis() -> Self { + Self::Genesis { + evm_emulator: false, + } + } + + const fn genesis_with_evm() -> Self { + Self::Genesis { evm_emulator: true } + } + fn empty_recovery() -> Self { Self::Recovery { logs: vec![], @@ -190,12 +209,29 @@ impl StorageInitialization { storage: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { match self { - Self::Genesis => { - let params = GenesisParams::load_genesis_params(GenesisConfig { + Self::Genesis { evm_emulator } => { + let mut config = GenesisConfig { l2_chain_id: network_config.zksync_network_id, ..mock_genesis_config() - }) + }; + let mut base_system_contracts = BaseSystemContracts::load_from_disk(); + if evm_emulator { + config.evm_emulator_hash = Some(config.default_aa_hash.unwrap()); + base_system_contracts.evm_emulator = + Some(base_system_contracts.default_aa.clone()); + } else { + assert!(config.evm_emulator_hash.is_none()); + } + + let params = GenesisParams::from_genesis_config( + config, + base_system_contracts, + // We cannot load system contracts with EVM emulator yet because these contracts are missing. + // This doesn't matter for tests because the EVM emulator won't be invoked. + get_system_smart_contracts(false), + ) .unwrap(); + if storage.blocks_dal().is_genesis_needed().await? { insert_genesis_batch(storage, ¶ms).await?; } @@ -254,11 +290,13 @@ async fn test_http_server(test: impl HttpTest) { let genesis = GenesisConfig::for_tests(); let mut api_config = InternalApiConfig::new(&web3_config, &contracts_config, &genesis); api_config.filters_disabled = test.filters_disabled(); - let mut server_handles = TestServerBuilder::new(pool.clone(), api_config) + let mut server_builder = TestServerBuilder::new(pool.clone(), api_config) .with_tx_executor(test.transaction_executor()) - .with_method_tracer(test.method_tracer()) - .build_http(stop_receiver) - .await; + .with_method_tracer(test.method_tracer()); + if let Some(executor_options) = test.executor_options() { + server_builder = server_builder.with_executor_options(executor_options); + } + let mut server_handles = server_builder.build_http(stop_receiver).await; let local_addr = server_handles.wait_until_ready().await; let client = Client::http(format!("http://{local_addr}/").parse().unwrap()) diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index 1f843e06fabf..45128f579cda 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -9,16 +9,21 @@ use std::{ }; use api::state_override::{OverrideAccount, StateOverride}; +use test_casing::test_casing; +use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; use zksync_multivm::interface::{ - ExecutionResult, VmExecutionLogs, VmExecutionResultAndLogs, VmRevertReason, + ExecutionResult, OneshotEnv, VmExecutionLogs, VmExecutionResultAndLogs, VmRevertReason, }; use zksync_types::{ api::ApiStorageLog, fee_model::BatchFeeInput, get_intrinsic_constants, transaction_request::CallRequest, K256PrivateKey, L2ChainId, PackedEthSignature, - StorageLogKind, StorageLogWithPreviousValue, U256, + StorageLogKind, StorageLogWithPreviousValue, Transaction, U256, }; use zksync_utils::u256_to_h256; -use zksync_vm_executor::oneshot::MockOneshotExecutor; +use zksync_vm_executor::oneshot::{ + BaseSystemContractsProvider, ContractsKind, MockOneshotExecutor, OneshotEnvParameters, + ResolvedBlockInfo, +}; use zksync_web3_decl::namespaces::DebugNamespaceClient; use super::*; @@ -61,6 +66,59 @@ impl ExpectedFeeInput { } } +/// Mock base contracts provider. Necessary to use with EVM emulator because bytecode of the real emulator is not available yet. +#[derive(Debug)] +struct BaseContractsWithMockEvmEmulator(BaseSystemContracts); + +impl Default for BaseContractsWithMockEvmEmulator { + fn default() -> Self { + let mut contracts = BaseSystemContracts::load_from_disk(); + contracts.evm_emulator = Some(contracts.default_aa.clone()); + Self(contracts) + } +} + +#[async_trait] +impl BaseSystemContractsProvider for BaseContractsWithMockEvmEmulator { + async fn base_system_contracts( + &self, + block_info: &ResolvedBlockInfo, + ) -> anyhow::Result { + assert!(block_info.use_evm_emulator()); + Ok(self.0.clone()) + } +} + +fn executor_options_with_evm_emulator() -> SandboxExecutorOptions { + let base_contracts = Arc::::default(); + SandboxExecutorOptions { + estimate_gas: OneshotEnvParameters::new( + base_contracts.clone(), + L2ChainId::default(), + AccountTreeId::default(), + u32::MAX, + ), + eth_call: OneshotEnvParameters::new( + base_contracts, + L2ChainId::default(), + AccountTreeId::default(), + u32::MAX, + ), + } +} + +/// Fetches base contract hashes from the genesis block. +async fn genesis_contract_hashes( + connection: &mut Connection<'_, Core>, +) -> anyhow::Result { + Ok(connection + .blocks_dal() + .get_l2_block_header(L2BlockNumber(0)) + .await? + .context("no genesis block")? + .base_system_contracts_hashes) +} + #[derive(Debug, Default)] struct CallTest { fee_input: ExpectedFeeInput, @@ -161,19 +219,104 @@ impl HttpTest for CallTest { store_custom_l2_block(&mut connection, &block_header, &[]).await?; // Fee input is not scaled further as per `ApiFeeInputProvider` implementation self.fee_input.expect_custom(block_header.batch_fee_input); - let call_request = CallTest::call_request(b"block=3"); - let call_result = client.call(call_request.clone(), None, None).await?; + let call_request = Self::call_request(b"block=3"); + let call_result = client.call(call_request, None, None).await?; assert_eq!(call_result.0, b"output"); + let call_request_without_target = CallRequest { + to: None, + ..Self::call_request(b"block=3") + }; + let err = client + .call(call_request_without_target, None, None) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) } } +fn assert_null_to_address_error(error: &ClientError) { + if let ClientError::Call(error) = error { + assert_eq!(error.code(), 3); + assert!(error.message().contains("toAddressIsNull"), "{error:?}"); + assert!(error.data().is_none(), "{error:?}"); + } else { + panic!("Unexpected error: {error:?}"); + } +} + #[tokio::test] async fn call_method_basics() { test_http_server(CallTest::default()).await; } +fn evm_emulator_responses(tx: &Transaction, env: &OneshotEnv) -> ExecutionResult { + assert!(env + .system + .base_system_smart_contracts + .evm_emulator + .is_some()); + match tx.execute.calldata.as_slice() { + b"no_target" => assert_eq!(tx.recipient_account(), None), + _ => assert!(tx.recipient_account().is_some()), + } + ExecutionResult::Success { + output: b"output".to_vec(), + } +} + +#[derive(Debug)] +struct CallTestWithEvmEmulator; + +#[async_trait] +impl HttpTest for CallTestWithEvmEmulator { + fn storage_initialization(&self) -> StorageInitialization { + StorageInitialization::genesis_with_evm() + } + + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut executor = MockOneshotExecutor::default(); + executor.set_call_responses(evm_emulator_responses); + executor + } + + fn executor_options(&self) -> Option { + Some(executor_options_with_evm_emulator()) + } + + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + // Store an additional L2 block because L2 block #0 has some special processing making it work incorrectly. + let mut connection = pool.connection().await?; + let block_header = L2BlockHeader { + base_system_contracts_hashes: genesis_contract_hashes(&mut connection).await?, + ..create_l2_block(1) + }; + store_custom_l2_block(&mut connection, &block_header, &[]).await?; + + let call_result = client.call(CallTest::call_request(&[]), None, None).await?; + assert_eq!(call_result.0, b"output"); + + let call_request_without_target = CallRequest { + to: None, + ..CallTest::call_request(b"no_target") + }; + let call_result = client.call(call_request_without_target, None, None).await?; + assert_eq!(call_result.0, b"output"); + Ok(()) + } +} + +#[tokio::test] +async fn call_method_with_evm_emulator() { + test_http_server(CallTestWithEvmEmulator).await; +} + #[derive(Debug, Default)] struct CallTestAfterSnapshotRecovery { fee_input: ExpectedFeeInput, @@ -253,7 +396,11 @@ impl SendRawTransactionTest { value: 123_456.into(), gas: (get_intrinsic_constants().l2_tx_intrinsic_gas * 2).into(), gas_price: StateKeeperConfig::for_tests().minimal_l2_gas_price.into(), - input: vec![1, 2, 3, 4].into(), + input: if include_to { + vec![1, 2, 3, 4].into() + } else { + b"no_target".to_vec().into() + }, ..api::TransactionRequest::default() }; let data = tx_request.get_rlp().unwrap(); @@ -288,7 +435,7 @@ impl HttpTest for SendRawTransactionTest { factory_deps: HashMap::default(), } } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } @@ -344,16 +491,6 @@ async fn send_raw_transaction_after_snapshot_recovery() { .await; } -fn assert_null_to_address_error(error: &ClientError) { - if let ClientError::Call(error) = error { - assert_eq!(error.code(), 3); - assert!(error.message().contains("toAddressIsNull"), "{error:?}"); - assert!(error.data().is_none(), "{error:?}"); - } else { - panic!("Unexpected error: {error:?}"); - } -} - #[derive(Debug)] struct SendRawTransactionWithoutToAddressTest; @@ -388,6 +525,56 @@ async fn send_raw_transaction_fails_without_to_address() { test_http_server(SendRawTransactionWithoutToAddressTest).await; } +#[derive(Debug)] +struct SendRawTransactionTestWithEvmEmulator; + +#[async_trait] +impl HttpTest for SendRawTransactionTestWithEvmEmulator { + fn storage_initialization(&self) -> StorageInitialization { + StorageInitialization::genesis_with_evm() + } + + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut executor = MockOneshotExecutor::default(); + executor.set_tx_responses(evm_emulator_responses); + executor + } + + fn executor_options(&self) -> Option { + Some(executor_options_with_evm_emulator()) + } + + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + // Manually set sufficient balance for the transaction account. + let mut storage = pool.connection().await?; + storage + .storage_logs_dal() + .append_storage_logs( + L2BlockNumber(0), + &[SendRawTransactionTest::balance_storage_log()], + ) + .await?; + + let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(true); + let send_result = client.send_raw_transaction(tx_bytes.into()).await?; + assert_eq!(send_result, tx_hash); + + let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(false); + let send_result = client.send_raw_transaction(tx_bytes.into()).await?; + assert_eq!(send_result, tx_hash); + Ok(()) + } +} + +#[tokio::test] +async fn send_raw_transaction_with_evm_emulator() { + test_http_server(SendRawTransactionTestWithEvmEmulator).await; +} + #[derive(Debug)] struct SendTransactionWithDetailedOutputTest; @@ -602,6 +789,16 @@ impl HttpTest for TraceCallTest { let call_result = client.trace_call(call_request.clone(), None, None).await?; Self::assert_debug_call(&call_request, &call_result.unwrap_default()); + let call_request_without_target = CallRequest { + to: None, + ..CallTest::call_request(b"block=3") + }; + let err = client + .call(call_request_without_target, None, None) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) } } @@ -678,16 +875,96 @@ async fn trace_call_after_snapshot_recovery() { test_http_server(TraceCallTestAfterSnapshotRecovery::default()).await; } +#[derive(Debug)] +struct TraceCallTestWithEvmEmulator; + +#[async_trait] +impl HttpTest for TraceCallTestWithEvmEmulator { + fn storage_initialization(&self) -> StorageInitialization { + StorageInitialization::genesis_with_evm() + } + + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut executor = MockOneshotExecutor::default(); + executor.set_call_responses(evm_emulator_responses); + executor + } + + fn executor_options(&self) -> Option { + Some(executor_options_with_evm_emulator()) + } + + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + // Store an additional L2 block because L2 block #0 has some special processing making it work incorrectly. + let mut connection = pool.connection().await?; + let block_header = L2BlockHeader { + base_system_contracts_hashes: genesis_contract_hashes(&mut connection).await?, + ..create_l2_block(1) + }; + store_custom_l2_block(&mut connection, &block_header, &[]).await?; + + client + .trace_call(CallTest::call_request(&[]), None, None) + .await?; + + let call_request_without_target = CallRequest { + to: None, + ..CallTest::call_request(b"no_target") + }; + client + .trace_call(call_request_without_target, None, None) + .await?; + Ok(()) + } +} + +#[tokio::test] +async fn trace_call_method_with_evm_emulator() { + test_http_server(TraceCallTestWithEvmEmulator).await; +} + +#[derive(Debug, Clone, Copy)] +enum EstimateMethod { + EthEstimateGas, + ZksEstimateFee, + ZksEstimateGasL1ToL2, +} + +impl EstimateMethod { + const ALL: [Self; 3] = [ + Self::EthEstimateGas, + Self::ZksEstimateFee, + Self::ZksEstimateGasL1ToL2, + ]; + + async fn query(self, client: &DynClient, req: CallRequest) -> Result { + match self { + Self::EthEstimateGas => client.estimate_gas(req, None, None).await, + Self::ZksEstimateFee => client + .estimate_fee(req, None) + .await + .map(|fee| fee.gas_limit), + Self::ZksEstimateGasL1ToL2 => client.estimate_gas_l1_to_l2(req, None).await, + } + } +} + #[derive(Debug)] struct EstimateGasTest { gas_limit_threshold: Arc, + method: EstimateMethod, snapshot_recovery: bool, } impl EstimateGasTest { - fn new(snapshot_recovery: bool) -> Self { + fn new(method: EstimateMethod, snapshot_recovery: bool) -> Self { Self { gas_limit_threshold: Arc::default(), + method, snapshot_recovery, } } @@ -708,9 +985,12 @@ impl HttpTest for EstimateGasTest { L2BlockNumber(1) }; let gas_limit_threshold = self.gas_limit_threshold.clone(); + let should_set_nonce = !matches!(self.method, EstimateMethod::ZksEstimateGasL1ToL2); tx_executor.set_tx_responses(move |tx, env| { assert_eq!(tx.execute.calldata(), [] as [u8; 0]); - assert_eq!(tx.nonce(), Some(Nonce(0))); + if should_set_nonce { + assert_eq!(tx.nonce(), Some(Nonce(0))); + } assert_eq!(env.l1_batch.first_l2_block.number, pending_block_number.0); let gas_limit_threshold = gas_limit_threshold.load(Ordering::SeqCst); @@ -733,8 +1013,9 @@ impl HttpTest for EstimateGasTest { let l2_transaction = create_l2_transaction(10, 100); for threshold in [10_000, 50_000, 100_000, 1_000_000] { self.gas_limit_threshold.store(threshold, Ordering::Relaxed); - let output = client - .estimate_gas(l2_transaction.clone().into(), None, None) + let output = self + .method + .query(client, l2_transaction.clone().into()) .await?; assert!( output >= U256::from(threshold), @@ -759,19 +1040,17 @@ impl HttpTest for EstimateGasTest { let mut call_request = CallRequest::from(l2_transaction); call_request.from = Some(SendRawTransactionTest::private_key().address()); call_request.value = Some(1_000_000.into()); - client - .estimate_gas(call_request.clone(), None, None) - .await?; + + self.method.query(client, call_request.clone()).await?; call_request.value = Some(U256::max_value()); - let error = client - .estimate_gas(call_request, None, None) - .await - .unwrap_err(); + let error = self.method.query(client, call_request).await.unwrap_err(); if let ClientError::Call(error) = error { let error_msg = error.message(); + // L1 and L2 transactions have differing error messages in this case. assert!( - error_msg.to_lowercase().contains("insufficient"), + error_msg.to_lowercase().contains("insufficient") + || error_msg.to_lowercase().contains("overflow"), "{error_msg}" ); } else { @@ -781,14 +1060,16 @@ impl HttpTest for EstimateGasTest { } } +#[test_casing(3, EstimateMethod::ALL)] #[tokio::test] -async fn estimate_gas_basics() { - test_http_server(EstimateGasTest::new(false)).await; +async fn estimate_gas_basics(method: EstimateMethod) { + test_http_server(EstimateGasTest::new(method, false)).await; } +#[test_casing(3, EstimateMethod::ALL)] #[tokio::test] -async fn estimate_gas_after_snapshot_recovery() { - test_http_server(EstimateGasTest::new(true)).await; +async fn estimate_gas_after_snapshot_recovery(method: EstimateMethod) { + test_http_server(EstimateGasTest::new(method, true)).await; } #[derive(Debug)] @@ -845,9 +1126,7 @@ impl HttpTest for EstimateGasWithStateOverrideTest { if let ClientError::Call(error) = error { let error_msg = error.message(); assert!( - error_msg - .to_lowercase() - .contains("insufficient balance for transfer"), + error_msg.to_lowercase().contains("insufficient funds"), "{error_msg}" ); } else { @@ -859,15 +1138,17 @@ impl HttpTest for EstimateGasWithStateOverrideTest { #[tokio::test] async fn estimate_gas_with_state_override() { - let inner = EstimateGasTest::new(false); + let inner = EstimateGasTest::new(EstimateMethod::EthEstimateGas, false); test_http_server(EstimateGasWithStateOverrideTest { inner }).await; } #[derive(Debug)] -struct EstimateGasWithoutToAddessTest; +struct EstimateGasWithoutToAddressTest { + method: EstimateMethod, +} #[async_trait] -impl HttpTest for EstimateGasWithoutToAddessTest { +impl HttpTest for EstimateGasWithoutToAddressTest { async fn test( &self, client: &DynClient, @@ -876,8 +1157,9 @@ impl HttpTest for EstimateGasWithoutToAddessTest { let mut l2_transaction = create_l2_transaction(10, 100); l2_transaction.execute.contract_address = None; l2_transaction.common_data.signature = vec![]; // Remove invalidated signature so that it doesn't trip estimation logic - let err = client - .estimate_gas(l2_transaction.clone().into(), None, None) + let err = self + .method + .query(client, l2_transaction.into()) .await .unwrap_err(); assert_null_to_address_error(&err); @@ -885,7 +1167,58 @@ impl HttpTest for EstimateGasWithoutToAddessTest { } } +#[test_casing(3, EstimateMethod::ALL)] +#[tokio::test] +async fn estimate_gas_fails_without_to_address(method: EstimateMethod) { + test_http_server(EstimateGasWithoutToAddressTest { method }).await; +} + +#[derive(Debug)] +struct EstimateGasTestWithEvmEmulator { + method: EstimateMethod, +} + +#[async_trait] +impl HttpTest for EstimateGasTestWithEvmEmulator { + fn storage_initialization(&self) -> StorageInitialization { + StorageInitialization::genesis_with_evm() + } + + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut executor = MockOneshotExecutor::default(); + executor.set_tx_responses(evm_emulator_responses); + executor + } + + fn executor_options(&self) -> Option { + Some(executor_options_with_evm_emulator()) + } + + async fn test( + &self, + client: &DynClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let call_request = CallRequest { + from: Some(Address::repeat_byte(1)), + to: Some(Address::repeat_byte(2)), + ..CallRequest::default() + }; + self.method.query(client, call_request).await?; + + let call_request = CallRequest { + from: Some(Address::repeat_byte(1)), + to: None, + data: Some(b"no_target".to_vec().into()), + ..CallRequest::default() + }; + self.method.query(client, call_request).await?; + Ok(()) + } +} + +#[test_casing(3, EstimateMethod::ALL)] #[tokio::test] -async fn estimate_gas_fails_without_to_address() { - test_http_server(EstimateGasWithoutToAddessTest).await; +async fn estimate_gas_with_evm_emulator(method: EstimateMethod) { + test_http_server(EstimateGasTestWithEvmEmulator { method }).await; } diff --git a/core/node/api_server/src/web3/tests/ws.rs b/core/node/api_server/src/web3/tests/ws.rs index 28b2e2beb554..008747a63bcc 100644 --- a/core/node/api_server/src/web3/tests/ws.rs +++ b/core/node/api_server/src/web3/tests/ws.rs @@ -147,7 +147,7 @@ async fn notifiers_start_after_snapshot_recovery() { trait WsTest: Send + Sync { /// Prepares the storage before the server is started. The default implementation performs genesis. fn storage_initialization(&self) -> StorageInitialization { - StorageInitialization::Genesis + StorageInitialization::genesis() } async fn test( @@ -234,7 +234,7 @@ impl WsTest for BasicSubscriptionsTest { if self.snapshot_recovery { StorageInitialization::empty_recovery() } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } @@ -403,7 +403,7 @@ impl WsTest for LogSubscriptionsTest { if self.snapshot_recovery { StorageInitialization::empty_recovery() } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs index 149e6b3ccb03..46b84c34061d 100644 --- a/core/node/consensus/src/vm.rs +++ b/core/node/consensus/src/vm.rs @@ -1,11 +1,15 @@ +use std::sync::Arc; + use anyhow::Context as _; use tokio::runtime::Handle; -use zksync_concurrency::{ctx, error::Wrap as _}; +use zksync_concurrency::{ctx, error::Wrap as _, scope}; use zksync_consensus_roles::attester; use zksync_state::PostgresStorage; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ethabi, fee::Fee, l2::L2Tx, AccountTreeId, L2ChainId, Nonce, U256}; -use zksync_vm_executor::oneshot::{CallOrExecute, MainOneshotExecutor, OneshotEnvParameters}; +use zksync_vm_executor::oneshot::{ + CallOrExecute, MainOneshotExecutor, MultiVMBaseSystemContracts, OneshotEnvParameters, +}; use zksync_vm_interface::{ executor::OneshotExecutor, ExecutionResult, OneshotTracingParams, TxExecutionArgs, }; @@ -23,16 +27,17 @@ pub(crate) struct VM { impl VM { /// Constructs a new `VM` instance. pub async fn new(pool: ConnectionPool) -> Self { + let base_system_contracts = + scope::wait_blocking(MultiVMBaseSystemContracts::load_eth_call_blocking).await; Self { pool, // L2 chain ID and fee account don't seem to matter for calls, hence the use of default values. - options: OneshotEnvParameters::for_execution( + options: OneshotEnvParameters::new( + Arc::new(base_system_contracts), L2ChainId::default(), AccountTreeId::default(), u32::MAX, - ) - .await - .expect("OneshotExecutorOptions"), + ), executor: MainOneshotExecutor::new(usize::MAX), } } From 93d2575e2a122d1395536da4403ccd570c85cf88 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Wed, 16 Oct 2024 11:42:26 +0300 Subject: [PATCH 3/9] feat(contract-verifier): add compiler 1.5.6 (#3104) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ add zksolc 1.5.6 ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- docker/contract-verifier/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 7ed18626a1b3..80938e4ef835 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -47,7 +47,7 @@ RUN mkdir -p /etc/zksolc-bin/vm-1.5.0-a167aa3 && \ chmod +x /etc/zksolc-bin/vm-1.5.0-a167aa3/zksolc # install zksolc 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 5); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 6); do \ mkdir -p /etc/zksolc-bin/$VERSION && \ wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-$VERSION -O /etc/zksolc-bin/$VERSION/zksolc && \ chmod +x /etc/zksolc-bin/$VERSION/zksolc; \ From 0edd7962429b3530ae751bd7cc947c97193dd0ca Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Wed, 16 Oct 2024 15:11:57 +0300 Subject: [PATCH 4/9] feat: Prover e2e test (#2975) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add workflow that runs proving for a genesis batch. Update dockerfiles and docker compose for GPU Add circuit prover to zkstack CLI. Fix HTTP URL for prover gateway. ## Why ❔ To detect possible runtime issues. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/ci-prover-e2e.yml | 127 ++++++++++++++++++ .github/workflows/ci.yml | 6 + .github/workflows/zk-environment-publish.yml | 6 +- .../batch_availability_checker | 40 ++++++ bin/prover_checkers/batch_l1_status_checker | 54 ++++++++ bin/prover_checkers/kill_prover | 12 ++ .../prover_jobs_status_checker | 42 ++++++ core/node/proof_data_handler/src/lib.rs | 2 +- docker-compose-gpu-runner-cuda-12-0.yml | 13 +- docker-compose-gpu-runner.yml | 7 +- ...rfile => 22.04_amd64_cuda_11_8.Dockerfile} | 23 ++-- ...rfile => 22.04_amd64_cuda_12_0.Dockerfile} | 21 +-- prover/crates/lib/prover_fri_types/src/lib.rs | 4 +- .../src/commands/chain/init/configs.rs | 12 ++ .../zkstack/src/commands/prover/args/init.rs | 62 +++++++-- .../commands/prover/args/init_bellman_cuda.rs | 33 +++-- .../zkstack/src/commands/prover/args/run.rs | 69 +++++++++- .../crates/zkstack/src/commands/prover/run.rs | 9 +- zkstack_cli/crates/zkstack/src/consts.rs | 2 + zkstack_cli/crates/zkstack/src/messages.rs | 2 + 20 files changed, 490 insertions(+), 56 deletions(-) create mode 100644 .github/workflows/ci-prover-e2e.yml create mode 100644 bin/prover_checkers/batch_availability_checker create mode 100755 bin/prover_checkers/batch_l1_status_checker create mode 100644 bin/prover_checkers/kill_prover create mode 100755 bin/prover_checkers/prover_jobs_status_checker rename docker/zk-environment/{20.04_amd64_cuda_11_8.Dockerfile => 22.04_amd64_cuda_11_8.Dockerfile} (95%) rename docker/zk-environment/{20.04_amd64_cuda_12_0.Dockerfile => 22.04_amd64_cuda_12_0.Dockerfile} (96%) diff --git a/.github/workflows/ci-prover-e2e.yml b/.github/workflows/ci-prover-e2e.yml new file mode 100644 index 000000000000..105ae1f1485d --- /dev/null +++ b/.github/workflows/ci-prover-e2e.yml @@ -0,0 +1,127 @@ +name: Workflow for testing prover component end-to-end +on: + workflow_call: + +jobs: + e2e-test: + runs-on: [ matterlabs-ci-gpu-l4-runner-prover-tests ] + env: + RUNNER_COMPOSE_FILE: "docker-compose-gpu-runner-cuda-12-0.yml" + + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + with: + submodules: "recursive" + fetch-depth: 0 + + - name: Setup environment + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env + + mkdir -p prover_logs + + - name: Start services + run: | + run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull + mkdir -p ./volumes/postgres ./volumes/reth/data + docker-compose -f ${RUNNER_COMPOSE_FILE} --profile runner up -d --wait + ci_run sccache --start-server + + - name: Init + run: | + ci_run git config --global --add safe.directory "*" + ci_run chmod -R +x ./bin + + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + + ci_run zkstack chain create \ + --chain-name proving_chain \ + --chain-id sequential \ + --prover-mode gpu \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode rollup \ + --base-token-address 0x0000000000000000000000000000000000000001 \ + --base-token-price-nominator 1 \ + --base-token-price-denominator 1 \ + --set-as-default true \ + --ignore-prerequisites + + ci_run zkstack ecosystem init --dev --verbose + ci_run zkstack prover init --dev --verbose + + echo "URL=$(grep "http_url" ./chains/proving_chain/configs/general.yaml | awk '{ print $2 }')" >> $GITHUB_ENV + - name: Build prover binaries + run: | + ci_run cargo build --release --workspace --manifest-path=prover/Cargo.toml + - name: Prepare prover subsystem + run: | + ci_run zkstack prover init-bellman-cuda --clone --verbose + ci_run zkstack prover setup-keys --mode=download --region=us --verbose + - name: Run server + run: | + ci_run zkstack server --uring --chain=proving_chain --components=api,tree,eth,state_keeper,commitment_generator,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip &>prover_logs/server.log & + - name: Run Gateway + run: | + ci_run zkstack prover run --component=gateway --docker=false &>prover_logs/gateway.log & + - name: Run Prover Job Monitor + run: | + ci_run zkstack prover run --component=prover-job-monitor --docker=false &>prover_logs/prover-job-monitor.log & + - name: Wait for batch to be passed through gateway + env: + DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain + BATCH_NUMBER: 1 + INTERVAL: 30 + TIMEOUT: 300 + run: | + PASSED_ENV_VARS="DATABASE_URL,BATCH_NUMBER,INTERVAL,TIMEOUT" \ + ci_run ./bin/prover_checkers/batch_availability_checker + - name: Run Witness Generator + run: | + ci_run zkstack prover run --component=witness-generator --round=all-rounds --docker=false &>prover_logs/witness-generator.log & + - name: Run Circuit Prover + run: | + ci_run zkstack prover run --component=circuit-prover --witness-vector-generator-count=10 --docker=false &>prover_logs/circuit_prover.log & + - name: Wait for prover jobs to finish + env: + DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain + BATCH_NUMBER: 1 + INTERVAL: 30 + TIMEOUT: 1200 + run: | + PASSED_ENV_VARS="DATABASE_URL,BATCH_NUMBER,INTERVAL,TIMEOUT" \ + ci_run ./bin/prover_checkers/prover_jobs_status_checker + + - name: Kill prover & start compressor + run: | + sudo ./bin/prover_checkers/kill_prover + + ci_run zkstack prover run --component=compressor --docker=false &>prover_logs/compressor.log & + - name: Wait for batch to be executed on L1 + env: + DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain + BATCH_NUMBER: 1 + INTERVAL: 30 + TIMEOUT: 600 + run: | + PASSED_ENV_VARS="BATCH_NUMBER,DATABASE_URL,URL,INTERVAL,TIMEOUT" \ + ci_run ./bin/prover_checkers/batch_l1_status_checker + + - name: Upload logs + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + if: always() + with: + name: prover_logs + path: prover_logs + + - name: Show sccache logs + if: always() + run: | + ci_run sccache --show-stats || true + ci_run cat /tmp/sccache_log.txt || true diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fd9dedf8af4e..47ae3c517517 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -94,6 +94,12 @@ jobs: name: CI for Prover Components uses: ./.github/workflows/ci-prover-reusable.yml + e2e-for-prover: + name: E2E Test for Prover Components + needs: changed_files + if: ${{(needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} + uses: ./.github/workflows/ci-prover-e2e.yml + ci-for-docs: needs: changed_files if: needs.changed_files.outputs.docs == 'true' diff --git a/.github/workflows/zk-environment-publish.yml b/.github/workflows/zk-environment-publish.yml index 73303d15cb30..b9321c8f5d6c 100644 --- a/.github/workflows/zk-environment-publish.yml +++ b/.github/workflows/zk-environment-publish.yml @@ -49,10 +49,10 @@ jobs: - docker/zk-environment/Dockerfile - .github/workflows/zk-environment-publish.yml zk_env_cuda_11_8: - - docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile + - docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile - .github/workflows/zk-environment-publish.yml zk_env_cuda_12: - - docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile + - docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile - .github/workflows/zk-environment-publish.yml get_short_sha: @@ -245,7 +245,7 @@ jobs: if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445 # v6.5.0 with: - file: docker/zk-environment/20.04_amd64_cuda_${{ matrix.cuda_version }}.Dockerfile + file: docker/zk-environment/22.04_amd64_cuda_${{ matrix.cuda_version }}.Dockerfile push: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/main' ) || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} tags: | us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zk-environment-cuda-${{ matrix.cuda_version }}:latest diff --git a/bin/prover_checkers/batch_availability_checker b/bin/prover_checkers/batch_availability_checker new file mode 100644 index 000000000000..ae7aade2f687 --- /dev/null +++ b/bin/prover_checkers/batch_availability_checker @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Configuration +# DATABASE_URL - The URL of the prover database to connect to +# BATCH_NUMBER - The batch number to check availability for +# INTERVAL - Time interval for polling in seconds +# TIMEOUT - Timeout of script in seconds + +# Start timer +START_TIME=$(date +%s) + +# Loop to query periodically +while true; do + # Calculate the elapsed time + CURRENT_TIME=$(date +%s) + ELAPSED_TIME=$((CURRENT_TIME - START_TIME)) + + # Check if the timeout has been reached + if [ $ELAPSED_TIME -ge $TIMEOUT ]; then + echo "Timeout reached. Failing CI..." + exit 1 # Exit with non-zero status to fail CI + fi + + # Run the SQL query and capture the result + RESULT=$(psql $DATABASE_URL -c "SELECT count(*) FROM witness_inputs_fri WHERE l1_batch_number = $BATCH_NUMBER;" -t -A) + + # Check if the result is 1 + if [ "$RESULT" -eq 1 ]; then + echo "Query result is 1. Success!" + exit 0 # Exit with zero status to succeed CI + else + echo "Batch is not available yet. Retrying in $INTERVAL seconds..." + fi + + # Wait for the next interval + sleep $INTERVAL +done diff --git a/bin/prover_checkers/batch_l1_status_checker b/bin/prover_checkers/batch_l1_status_checker new file mode 100755 index 000000000000..24f26e354eac --- /dev/null +++ b/bin/prover_checkers/batch_l1_status_checker @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Needs following configuration +# URL - URL of the API endpoint +# INTERVAL - Time interval for polling in seconds +# TIMEOUT - Timeout of script in seconds + +# Start timer +START_TIME=$(date +%s) + +echo "URL: $URL" + +# Loop to query periodically +while true; do + # Calculate the elapsed time + CURRENT_TIME=$(date +%s) + ELAPSED_TIME=$((CURRENT_TIME - START_TIME)) + + # Check if the timeout has been reached + if [ $ELAPSED_TIME -ge $TIMEOUT ]; then + echo "Timeout reached. Failing CI..." + exit 1 # Exit with non-zero status to fail CI + fi + + # Run the curl request and capture the response + RESPONSE=$(curl --silent --request POST \ + --url $URL \ + --header 'Content-Type: application/json' \ + --data '{ + "jsonrpc": "2.0", + "id": 1, + "method": "zks_getBlockDetails", + "params": [1] + }') + + # Parse the executedAt field using jq + EXECUTED_AT=$(echo $RESPONSE | jq -r '.result.executedAt') + + # Check if executedAt is not null + if [ "$EXECUTED_AT" != "null" ] && [ -n "$EXECUTED_AT" ]; then + echo "executedAt is not null: $EXECUTED_AT" + echo "true" + exit 0 # Exit with zero status to succeed CI + else + DATABASE_STATUS=$(psql $DATABASE_URL -c "SELECT status FROM proof_compression_jobs_fri WHERE l1_batch_number = $BATCH_NUMBER;" -t -A) + echo "executedAt is null, database status is $DATABASE_STATUS, retrying in $INTERVAL seconds..." + fi + + # Wait for the next interval + sleep $INTERVAL +done diff --git a/bin/prover_checkers/kill_prover b/bin/prover_checkers/kill_prover new file mode 100644 index 000000000000..2a65aea2d673 --- /dev/null +++ b/bin/prover_checkers/kill_prover @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Use pkill to find and kill processes using circuit prover +if ! pkill -f 'zksync_circuit_prover|zkstack prover run --component=circuit-prover'; then + echo "No processes are currently using the GPU." + exit 0 +fi + +echo "All GPU-related processes have been killed." diff --git a/bin/prover_checkers/prover_jobs_status_checker b/bin/prover_checkers/prover_jobs_status_checker new file mode 100755 index 000000000000..6816d9a2d140 --- /dev/null +++ b/bin/prover_checkers/prover_jobs_status_checker @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Configuration +# DATABASE_URL - The URL of the prover database to connect to +# BATCH_NUMBER - The batch number to check readiness for +# INTERVAL - Time interval for polling in seconds +# TIMEOUT - Timeout of script in seconds + +# Start timer +START_TIME=$(date +%s) + +# Loop to query periodically +while true; do + # Calculate the elapsed time + CURRENT_TIME=$(date +%s) + ELAPSED_TIME=$((CURRENT_TIME - START_TIME)) + + # Check if the timeout has been reached + if [ $ELAPSED_TIME -ge $TIMEOUT ]; then + echo "Timeout reached. Failing CI..." + exit 1 # Exit with non-zero status to fail CI + fi + + # Run the SQL query and capture the result + RESULT=$(psql $DATABASE_URL -c "SELECT count(*) FROM proof_compression_jobs_fri WHERE l1_batch_number = $BATCH_NUMBER AND status = 'queued';" -t -A) + + # Check if the result is 1 + if [ "$RESULT" -eq 1 ]; then + echo "Query result is 1. Success!" + exit 0 # Exit with zero status to succeed CI + else + STATUS=$(psql $DATABASE_URL -c "SELECT COUNT(*), status FROM prover_jobs_fri WHERE l1_batch_number = $BATCH_NUMBER GROUP BY status;" -t -A) + echo "Current status is $STATUS" + echo "Retrying in $INTERVAL seconds..." + fi + + # Wait for the next interval + sleep $INTERVAL +done diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index 618a786ea658..51780f03230d 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -30,7 +30,7 @@ pub async fn run_server( mut stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { let bind_address = SocketAddr::from(([0, 0, 0, 0], config.http_port)); - tracing::debug!("Starting proof data handler server on {bind_address}"); + tracing::info!("Starting proof data handler server on {bind_address}"); let app = create_proof_processing_router(blob_store, connection_pool, config, commitment_mode); let listener = tokio::net::TcpListener::bind(bind_address) diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml index 35a0faeb9620..c930fa376f5e 100644 --- a/docker-compose-gpu-runner-cuda-12-0.yml +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -3,6 +3,8 @@ services: reth: restart: always image: "ghcr.io/paradigmxyz/reth:v1.0.6" + ports: + - 127.0.0.1:8545:8545 volumes: - type: bind source: ./volumes/reth/data @@ -12,11 +14,9 @@ services: target: /chaindata command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config - ports: - - 127.0.0.1:8545:8545 zk: - image: ghcr.io/matter-labs/zk-environment:cuda-12-0-latest + image: ghcr.io/matter-labs/zk-environment:cuda-12_0-latest depends_on: - reth - postgres @@ -49,11 +49,18 @@ services: - /dev/nvidia-uvm-tools:/dev/nvidia-uvm-tools env_file: - ./.env + extra_hosts: + - "host:host-gateway" + profiles: + - runner + network_mode: host + pid: host deploy: resources: reservations: devices: - capabilities: [ gpu ] + postgres: image: "postgres:14" command: postgres -c 'max_connections=200' diff --git a/docker-compose-gpu-runner.yml b/docker-compose-gpu-runner.yml index f95ae0d5f544..32665eb7010a 100644 --- a/docker-compose-gpu-runner.yml +++ b/docker-compose-gpu-runner.yml @@ -16,7 +16,7 @@ services: - 127.0.0.1:8545:8545 zk: - image: "ghcr.io/matter-labs/zk-environment:cuda-11-8-latest" + image: "ghcr.io/matter-labs/zk-environment:cuda-11_8-latest" container_name: zk depends_on: - reth @@ -40,6 +40,11 @@ services: - GITHUB_WORKSPACE=$GITHUB_WORKSPACE env_file: - ./.env + extra_hosts: + - "host:host-gateway" + profiles: + - runner + network_mode: host deploy: resources: reservations: diff --git a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile b/docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile similarity index 95% rename from docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile rename to docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile index 90f089ba8bd4..fe44d55acbbc 100644 --- a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile +++ b/docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:20.04@sha256:3246518d9735254519e1b2ff35f95686e4a5011c90c85344c1f38df7bae9dd37 as base +FROM ubuntu:22.04@sha256:3d1556a8a18cf5307b121e0a98e93f1ddf1f3f8e092f1fddfd941254785b95d7 as base # Link Docker Image with repository # https://docs.github.com/en/packages/learn-github-packages/connecting-a-repository-to-a-package#connecting-a-repository-to-a-container-image-using-the-command-line @@ -16,7 +16,7 @@ RUN apt-get update && apt-get install -y \ git \ openssl \ libssl-dev \ - gcc \ + gcc-10 \ g++ \ curl \ pkg-config \ @@ -31,19 +31,19 @@ RUN apt-get update && apt-get install -y \ wget \ bzip2 \ unzip \ - hub + hub \ + curl \ + gnutls-bin git \ + build-essential \ + clang \ + lldb \ + lld # Install dependencies for RocksDB. `liburing` is not available for Ubuntu 20.04, # so we use a PPA with the backport RUN add-apt-repository ppa:savoury1/virtualisation && \ apt-get update && \ apt-get install -y \ - curl \ - gnutls-bin git \ - build-essential \ - clang \ - lldb \ - lld \ liburing-dev \ libclang-dev @@ -83,6 +83,11 @@ RUN rustup default stable RUN cargo install --version=0.8.0 sqlx-cli RUN cargo install cargo-nextest +RUN git clone https://github.com/matter-labs/foundry-zksync +RUN cd foundry-zksync && cargo build --release --bins +RUN mv ./foundry-zksync/target/release/forge /usr/local/cargo/bin/ +RUN mv ./foundry-zksync/target/release/cast /usr/local/cargo/bin/ + # Copy compiler (both solc and zksolc) binaries # Obtain `solc` 0.8.20. RUN wget -c https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.20%2Bcommit.a1b79de6 \ diff --git a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile b/docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile similarity index 96% rename from docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile rename to docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile index b6b023a5b7f4..da041b121816 100644 --- a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile +++ b/docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:20.04 as base +FROM ubuntu:22.04@sha256:3d1556a8a18cf5307b121e0a98e93f1ddf1f3f8e092f1fddfd941254785b95d7 as base # Link Docker Image with repository # https://docs.github.com/en/packages/learn-github-packages/connecting-a-repository-to-a-package#connecting-a-repository-to-a-container-image-using-the-command-line @@ -16,7 +16,7 @@ RUN apt-get update && apt-get install -y \ git \ openssl \ libssl-dev \ - gcc \ + gcc-10 \ g++ \ curl \ pkg-config \ @@ -30,18 +30,18 @@ RUN apt-get update && apt-get install -y \ gnupg2 \ postgresql-client \ hub \ - unzip + unzip \ + gnutls-bin \ + build-essential \ + clang \ + lldb\ + lld # Install dependencies for RocksDB. `liburing` is not available for Ubuntu 20.04, # so we use a PPA with the backport RUN add-apt-repository ppa:savoury1/virtualisation && \ apt-get update && \ apt-get install -y \ - gnutls-bin \ - build-essential \ - clang \ - lldb\ - lld \ liburing-dev \ libclang-dev @@ -81,6 +81,11 @@ RUN rustup default stable RUN cargo install --version=0.8.0 sqlx-cli RUN cargo install cargo-nextest +RUN git clone https://github.com/matter-labs/foundry-zksync +RUN cd foundry-zksync && cargo build --release --bins +RUN mv ./foundry-zksync/target/release/forge /usr/local/cargo/bin/ +RUN mv ./foundry-zksync/target/release/cast /usr/local/cargo/bin/ + # Copy compiler (both solc and zksolc) binaries # Obtain `solc` 0.8.20. RUN wget -c https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.20%2Bcommit.a1b79de6 \ diff --git a/prover/crates/lib/prover_fri_types/src/lib.rs b/prover/crates/lib/prover_fri_types/src/lib.rs index 4a8a1b3e4064..37e004d54ecc 100644 --- a/prover/crates/lib/prover_fri_types/src/lib.rs +++ b/prover/crates/lib/prover_fri_types/src/lib.rs @@ -28,8 +28,8 @@ pub mod keys; pub mod queue; // THESE VALUES SHOULD BE UPDATED ON ANY PROTOCOL UPGRADE OF PROVERS -pub const PROVER_PROTOCOL_VERSION: ProtocolVersionId = ProtocolVersionId::Version24; -pub const PROVER_PROTOCOL_PATCH: VersionPatch = VersionPatch(2); +pub const PROVER_PROTOCOL_VERSION: ProtocolVersionId = ProtocolVersionId::Version25; +pub const PROVER_PROTOCOL_PATCH: VersionPatch = VersionPatch(0); pub const PROVER_PROTOCOL_SEMANTIC_VERSION: ProtocolSemanticVersion = ProtocolSemanticVersion { minor: PROVER_PROTOCOL_VERSION, patch: PROVER_PROTOCOL_PATCH, diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs index 37ee2e076ab9..82986d9b41ae 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs @@ -57,6 +57,18 @@ pub async fn init_configs( } let mut general_config = chain_config.get_general_config()?; + + if general_config.proof_data_handler_config.is_some() && general_config.prover_gateway.is_some() + { + let proof_data_handler_config = general_config.proof_data_handler_config.clone().unwrap(); + let mut prover_gateway = general_config.prover_gateway.clone().unwrap(); + + prover_gateway.api_url = + format!("http://127.0.0.1:{}", proof_data_handler_config.http_port); + + general_config.prover_gateway = Some(prover_gateway); + } + let mut consensus_config = general_config .consensus_config .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs index 94fea1389d28..280b5b2e91d8 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs @@ -33,6 +33,9 @@ use crate::{ #[derive(Debug, Clone, Parser, Default)] pub struct ProverInitArgs { + #[clap(long)] + pub dev: bool, + // Proof store object #[clap(long)] pub proof_store_dir: Option, @@ -228,6 +231,10 @@ impl ProverInitArgs { ) -> anyhow::Result { logger::info(MSG_GETTING_PROOF_STORE_CONFIG); + if self.dev { + return Ok(self.handle_file_backed_config(Some(DEFAULT_PROOF_STORE_DIR.to_string()))); + } + if self.proof_store_dir.is_some() { return Ok(self.handle_file_backed_config(self.proof_store_dir.clone())); } @@ -277,6 +284,11 @@ impl ProverInitArgs { shell: &Shell, ) -> anyhow::Result> { logger::info(MSG_GETTING_PUBLIC_STORE_CONFIG); + + if self.dev { + return Ok(None); + } + let shall_save_to_public_bucket = self .shall_save_to_public_bucket .unwrap_or_else(|| PromptConfirm::new(MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT).ask()); @@ -345,6 +357,12 @@ impl ProverInitArgs { &self, default_path: &str, ) -> Option { + if self.dev { + return Some(CompressorKeysArgs { + path: Some(default_path.to_string()), + }); + } + let download_key = self.clone().setup_compressor_keys.unwrap_or_else(|| { PromptConfirm::new(MSG_DOWNLOAD_SETUP_COMPRESSOR_KEY_PROMPT) .default(false) @@ -363,6 +381,9 @@ impl ProverInitArgs { } fn fill_setup_keys_values_with_prompt(&self) -> Option { + if self.dev { + return None; + } let args = self.setup_keys_args.clone(); if self.setup_keys.unwrap_or_else(|| { @@ -475,6 +496,10 @@ impl ProverInitArgs { } fn fill_bellman_cuda_values_with_prompt(&self) -> Option { + if self.dev { + return None; + } + let args = self.bellman_cuda_config.clone(); if self.bellman_cuda.unwrap_or_else(|| { PromptConfirm::new(MSG_INITIALIZE_BELLMAN_CUDA_PROMPT) @@ -488,6 +513,10 @@ impl ProverInitArgs { } fn get_cloud_type_with_prompt(&self) -> CloudConnectionMode { + if self.dev { + return CloudConnectionMode::Local; + } + let cloud_type = self.cloud_type.clone().unwrap_or_else(|| { PromptSelect::new( MSG_CLOUD_TYPE_PROMPT, @@ -503,25 +532,32 @@ impl ProverInitArgs { &self, config: &ChainConfig, ) -> Option { - let setup_database = self - .setup_database - .unwrap_or_else(|| PromptConfirm::new("Do you want to setup the database?").ask()); + let setup_database = self.dev + || self + .setup_database + .unwrap_or_else(|| PromptConfirm::new("Do you want to setup the database?").ask()); if setup_database { let DBNames { prover_name, .. } = generate_db_names(config); let chain_name = config.name.clone(); - let dont_drop = self.dont_drop.unwrap_or_else(|| { - !PromptConfirm::new("Do you want to drop the database?") - .default(true) - .ask() - }); + let dont_drop = if !self.dev { + self.dont_drop.unwrap_or_else(|| { + !PromptConfirm::new("Do you want to drop the database?") + .default(true) + .ask() + }) + } else { + false + }; - if self.use_default.unwrap_or_else(|| { - PromptConfirm::new(MSG_USE_DEFAULT_DATABASES_HELP) - .default(true) - .ask() - }) { + if self.dev + || self.use_default.unwrap_or_else(|| { + PromptConfirm::new(MSG_USE_DEFAULT_DATABASES_HELP) + .default(true) + .ask() + }) + { Some(ProverDatabaseConfig { database_config: DatabaseConfig::new(DATABASE_PROVER_URL.clone(), prover_name), dont_drop, diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs index ba204b0be9e9..98a5c78be2a6 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs @@ -10,7 +10,9 @@ use crate::messages::{ #[derive(Debug, Clone, Parser, Default, Serialize, Deserialize)] pub struct InitBellmanCudaArgs { - #[clap(long)] + #[clap(long, conflicts_with_all(["bellman_cuda_dir"]))] + pub clone: bool, + #[clap(long, conflicts_with_all(["clone"]))] pub bellman_cuda_dir: Option, } @@ -31,19 +33,26 @@ impl std::fmt::Display for BellmanCudaPathSelection { impl InitBellmanCudaArgs { pub fn fill_values_with_prompt(self) -> InitBellmanCudaArgs { - let bellman_cuda_dir = self.bellman_cuda_dir.unwrap_or_else(|| { - match PromptSelect::new( - MSG_BELLMAN_CUDA_ORIGIN_SELECT, - BellmanCudaPathSelection::iter(), - ) - .ask() - { - BellmanCudaPathSelection::Clone => "".to_string(), - BellmanCudaPathSelection::Path => Prompt::new(MSG_BELLMAN_CUDA_DIR_PROMPT).ask(), - } - }); + let bellman_cuda_dir = if self.clone { + "".to_string() + } else { + self.bellman_cuda_dir.unwrap_or_else(|| { + match PromptSelect::new( + MSG_BELLMAN_CUDA_ORIGIN_SELECT, + BellmanCudaPathSelection::iter(), + ) + .ask() + { + BellmanCudaPathSelection::Clone => "".to_string(), + BellmanCudaPathSelection::Path => { + Prompt::new(MSG_BELLMAN_CUDA_DIR_PROMPT).ask() + } + } + }) + }; InitBellmanCudaArgs { + clone: self.clone, bellman_cuda_dir: Some(bellman_cuda_dir), } } diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs index 59a82152f1ff..d7600ba2d31f 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs @@ -8,7 +8,8 @@ use strum::{EnumIter, IntoEnumIterator}; use crate::{ consts::{ - COMPRESSOR_BINARY_NAME, COMPRESSOR_DOCKER_IMAGE, PROVER_BINARY_NAME, PROVER_DOCKER_IMAGE, + CIRCUIT_PROVER_BINARY_NAME, CIRCUIT_PROVER_DOCKER_IMAGE, COMPRESSOR_BINARY_NAME, + COMPRESSOR_DOCKER_IMAGE, PROVER_BINARY_NAME, PROVER_DOCKER_IMAGE, PROVER_GATEWAY_BINARY_NAME, PROVER_GATEWAY_DOCKER_IMAGE, PROVER_JOB_MONITOR_BINARY_NAME, PROVER_JOB_MONITOR_DOCKER_IMAGE, WITNESS_GENERATOR_BINARY_NAME, WITNESS_GENERATOR_DOCKER_IMAGE, WITNESS_VECTOR_GENERATOR_BINARY_NAME, @@ -30,6 +31,8 @@ pub struct ProverRunArgs { pub witness_vector_generator_args: WitnessVectorGeneratorArgs, #[clap(flatten)] pub fri_prover_args: FriProverRunArgs, + #[clap(flatten)] + pub circuit_prover_args: CircuitProverArgs, #[clap(long)] pub docker: Option, } @@ -46,6 +49,8 @@ pub enum ProverComponent { WitnessVectorGenerator, #[strum(to_string = "Prover")] Prover, + #[strum(to_string = "CircuitProver")] + CircuitProver, #[strum(to_string = "Compressor")] Compressor, #[strum(to_string = "ProverJobMonitor")] @@ -59,6 +64,7 @@ impl ProverComponent { Self::WitnessGenerator => WITNESS_GENERATOR_DOCKER_IMAGE, Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE, Self::Prover => PROVER_DOCKER_IMAGE, + Self::CircuitProver => CIRCUIT_PROVER_DOCKER_IMAGE, Self::Compressor => COMPRESSOR_DOCKER_IMAGE, Self::ProverJobMonitor => PROVER_JOB_MONITOR_DOCKER_IMAGE, } @@ -70,6 +76,7 @@ impl ProverComponent { Self::WitnessGenerator => WITNESS_GENERATOR_BINARY_NAME, Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_BINARY_NAME, Self::Prover => PROVER_BINARY_NAME, + Self::CircuitProver => CIRCUIT_PROVER_BINARY_NAME, Self::Compressor => COMPRESSOR_BINARY_NAME, Self::ProverJobMonitor => PROVER_JOB_MONITOR_BINARY_NAME, } @@ -78,10 +85,10 @@ impl ProverComponent { pub fn get_application_args(&self, in_docker: bool) -> anyhow::Result> { let mut application_args = vec![]; - if self == &Self::Prover || self == &Self::Compressor { + if self == &Self::Prover || self == &Self::Compressor || self == &Self::CircuitProver { if in_docker { application_args.push("--gpus=all".to_string()); - } else { + } else if self != &Self::CircuitProver { application_args.push("--features=gpu".to_string()); } } @@ -160,6 +167,26 @@ impl ProverComponent { )); }; } + Self::CircuitProver => { + if args.circuit_prover_args.max_allocation.is_some() { + additional_args.push(format!( + "--max-allocation={}", + args.fri_prover_args.max_allocation.unwrap() + )); + }; + if args + .circuit_prover_args + .witness_vector_generator_count + .is_some() + { + additional_args.push(format!( + "--witness-vector-generator-count={}", + args.circuit_prover_args + .witness_vector_generator_count + .unwrap() + )); + }; + } _ => {} }; @@ -211,6 +238,37 @@ impl WitnessVectorGeneratorArgs { } } +#[derive(Debug, Clone, Parser, Default)] +pub struct CircuitProverArgs { + #[clap(long)] + pub witness_vector_generator_count: Option, + #[clap(long)] + pub max_allocation: Option, +} + +impl CircuitProverArgs { + pub fn fill_values_with_prompt( + self, + component: ProverComponent, + ) -> anyhow::Result { + if component != ProverComponent::CircuitProver { + return Ok(Self::default()); + } + + let witness_vector_generator_count = + self.witness_vector_generator_count.unwrap_or_else(|| { + Prompt::new("Number of WVG jobs to run in parallel") + .default("1") + .ask() + }); + + Ok(CircuitProverArgs { + witness_vector_generator_count: Some(witness_vector_generator_count), + max_allocation: self.max_allocation, + }) + } +} + #[derive(Debug, Clone, Parser, Default)] pub struct FriProverRunArgs { /// Memory allocation limit in bytes (for prover component) @@ -232,6 +290,10 @@ impl ProverRunArgs { .witness_vector_generator_args .fill_values_with_prompt(component)?; + let circuit_prover_args = self + .circuit_prover_args + .fill_values_with_prompt(component)?; + let docker = self.docker.unwrap_or_else(|| { Prompt::new("Do you want to run Docker image for the component?") .default("false") @@ -243,6 +305,7 @@ impl ProverRunArgs { witness_generator_args, witness_vector_generator_args, fri_prover_args: self.fri_prover_args, + circuit_prover_args, docker: Some(docker), }) } diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs index ed2f5b41a86a..863816b9ae69 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/run.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs @@ -8,7 +8,8 @@ use xshell::{cmd, Shell}; use super::args::run::{ProverComponent, ProverRunArgs}; use crate::messages::{ MSG_BELLMAN_CUDA_DIR_ERR, MSG_CHAIN_NOT_FOUND_ERR, MSG_MISSING_COMPONENT_ERR, - MSG_RUNNING_COMPRESSOR, MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, + MSG_RUNNING_CIRCUIT_PROVER, MSG_RUNNING_CIRCUIT_PROVER_ERR, MSG_RUNNING_COMPRESSOR, + MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR, MSG_RUNNING_PROVER_JOB_MONITOR, MSG_RUNNING_PROVER_JOB_MONITOR_ERR, MSG_RUNNING_WITNESS_GENERATOR, MSG_RUNNING_WITNESS_GENERATOR_ERR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR, @@ -49,6 +50,12 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() } (MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR) } + ProverComponent::CircuitProver => { + if !in_docker { + check_prerequisites(shell, &GPU_PREREQUISITES, false); + } + (MSG_RUNNING_CIRCUIT_PROVER, MSG_RUNNING_CIRCUIT_PROVER_ERR) + } ProverComponent::Compressor => { if !in_docker { check_prerequisites(shell, &GPU_PREREQUISITES, false); diff --git a/zkstack_cli/crates/zkstack/src/consts.rs b/zkstack_cli/crates/zkstack/src/consts.rs index df27d2f02d2c..ba00af77b5a6 100644 --- a/zkstack_cli/crates/zkstack/src/consts.rs +++ b/zkstack_cli/crates/zkstack/src/consts.rs @@ -22,6 +22,7 @@ pub const WITNESS_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-generator:l pub const WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-vector-generator:latest2.0"; pub const PROVER_DOCKER_IMAGE: &str = "matterlabs/prover-gpu-fri:latest2.0"; +pub const CIRCUIT_PROVER_DOCKER_IMAGE: &str = "matterlabs/circuit-prover-gpu:latest2.0"; pub const COMPRESSOR_DOCKER_IMAGE: &str = "matterlabs/proof-fri-gpu-compressor:latest2.0"; pub const PROVER_JOB_MONITOR_DOCKER_IMAGE: &str = "matterlabs/prover-job-monitor:latest2.0"; @@ -29,6 +30,7 @@ pub const PROVER_GATEWAY_BINARY_NAME: &str = "zksync_prover_fri_gateway"; pub const WITNESS_GENERATOR_BINARY_NAME: &str = "zksync_witness_generator"; pub const WITNESS_VECTOR_GENERATOR_BINARY_NAME: &str = "zksync_witness_vector_generator"; pub const PROVER_BINARY_NAME: &str = "zksync_prover_fri"; +pub const CIRCUIT_PROVER_BINARY_NAME: &str = "zksync_circuit_prover"; pub const COMPRESSOR_BINARY_NAME: &str = "zksync_proof_fri_compressor"; pub const PROVER_JOB_MONITOR_BINARY_NAME: &str = "zksync_prover_job_monitor"; diff --git a/zkstack_cli/crates/zkstack/src/messages.rs b/zkstack_cli/crates/zkstack/src/messages.rs index d1d86db83989..6d6a1ceb566f 100644 --- a/zkstack_cli/crates/zkstack/src/messages.rs +++ b/zkstack_cli/crates/zkstack/src/messages.rs @@ -351,6 +351,7 @@ pub(super) const MSG_RUNNING_PROVER_JOB_MONITOR: &str = "Running prover job moni pub(super) const MSG_RUNNING_WITNESS_GENERATOR: &str = "Running witness generator"; pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR: &str = "Running witness vector generator"; pub(super) const MSG_RUNNING_PROVER: &str = "Running prover"; +pub(super) const MSG_RUNNING_CIRCUIT_PROVER: &str = "Running circuit prover"; pub(super) const MSG_RUNNING_COMPRESSOR: &str = "Running compressor"; pub(super) const MSG_RUN_COMPONENT_PROMPT: &str = "What component do you want to run?"; pub(super) const MSG_RUNNING_PROVER_GATEWAY_ERR: &str = "Failed to run prover gateway"; @@ -359,6 +360,7 @@ pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR: &str = "Failed to run witness vector generator"; pub(super) const MSG_RUNNING_COMPRESSOR_ERR: &str = "Failed to run compressor"; pub(super) const MSG_RUNNING_PROVER_ERR: &str = "Failed to run prover"; +pub(super) const MSG_RUNNING_CIRCUIT_PROVER_ERR: &str = "Failed to run circuit prover"; pub(super) const MSG_PROOF_STORE_CONFIG_PROMPT: &str = "Select where you would like to store the proofs"; pub(super) const MSG_PROOF_STORE_DIR_PROMPT: &str = From 525ba9f22e3ad8aad0242b27daed4c1f67b293c1 Mon Sep 17 00:00:00 2001 From: Vlad Bochok <41153528+vladbochok@users.noreply.github.com> Date: Wed, 16 Oct 2024 17:53:29 +0400 Subject: [PATCH 5/9] fix(protocol upgrade tool): Remove legacy from protocol upgrade tool (#3064) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Remove the calldata generated for `Governance.sol`. We don't use `Governance.sol` anymore and so this calldata is not needed. ## Why ❔ ## Checklist - [ x PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. --- infrastructure/protocol-upgrade/README.md | 18 +- .../protocol-upgrade/src/transaction.ts | 410 +----------------- 2 files changed, 16 insertions(+), 412 deletions(-) diff --git a/infrastructure/protocol-upgrade/README.md b/infrastructure/protocol-upgrade/README.md index da5ee313dab8..c7998b961233 100644 --- a/infrastructure/protocol-upgrade/README.md +++ b/infrastructure/protocol-upgrade/README.md @@ -25,13 +25,15 @@ If not provided as arguments, the tool can retrieve certain values from environm 2. `l2rpc` - `API_WEB3_JSON_RPC_HTTP_URL` 3. `create2-address` - `CONTRACTS_CREATE2_FACTORY_ADDR` 4. `zksync-address` - `CONTRACTS_DIAMOND_PROXY_ADDR` -5. `nonce` - Taken from the node via `l1rpc` -6. `gas-price` - Taken from the node via `l1rpc` -7. `environment` - By default, set to `localhost`. Always specify it explicitly. Possible values: `localhost`, +5. `upgrade-address` - `CONTRACTS_DEFAULT_UPGRADE_ADDR` +6. `l2-upgrader-address` - `CONTRACTS_L2_DEFAULT_UPGRADE_ADDR` +7. `nonce` - Taken from the node via `l1rpc` +8. `gas-price` - Taken from the node via `l1rpc` +9. `environment` - By default, set to `localhost`. Always specify it explicitly. Possible values: `localhost`, `testnet2`, `stage2`, `mainnet2`. Each upgrade on different environments is performed separately since the contract addresses differ between environments. -8. `private-key` - If not specified, the default key from the default mnemonic will be used. Always specify it - explicitly. +10. `private-key` - If not specified, the default key from the default mnemonic will be used. Always specify it + explicitly. ### Create a Protocol Upgrade Proposal @@ -215,8 +217,7 @@ $ zk f yarn start transactions build-default \ --l2-upgrader-address \ --diamond-upgrade-proposal-id \ --l1rpc \ ---zksync-address \ ---use-new-governance +--zksync-address ``` To execute the `proposeTransparentUpgrade` transaction on L1, use the following command: @@ -228,7 +229,6 @@ $ zk f yarn start transactions propose-upgrade \ --gas-price \ --nonce \ --zksync-address \ ---new-governance \ --environment ``` @@ -241,7 +241,6 @@ $ zk f yarn start transactions execute-upgrade \ --gas-price \ --nonce \ --zksync-address \ ---new-governance \ --environment ``` @@ -254,6 +253,5 @@ $ zk f yarn start transactions cancel-upgrade \ --zksync-address \ --gas-price \ --nonce \ ---new-governance \ --environment ``` diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts index dfea3a3bfc35..e7a3f32b3227 100644 --- a/infrastructure/protocol-upgrade/src/transaction.ts +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -3,12 +3,10 @@ import { ForceDeployUpgraderFactory as ForceDeployUpgraderFactoryL2 } from 'l2-c import { DefaultUpgradeFactory as DefaultUpgradeFactoryL1, AdminFacetFactory, - GovernanceFactory, StateTransitionManagerFactory, ChainAdminFactory } from 'l1-contracts/typechain'; import { FacetCut } from 'l1-contracts/src.ts/diamondCut'; -import { IZkSyncFactory } from '../pre-boojum/IZkSyncFactory'; import { ComplexUpgraderFactory } from 'system-contracts/typechain'; import { getCommonDataFileName, @@ -89,7 +87,6 @@ export interface ProposedUpgrade { postUpgradeCalldata: BytesLike; upgradeTimestamp: ethers.BigNumber; newProtocolVersion: BigNumberish; - newAllowList: string; } function buildNoopL2UpgradeTx(): L2CanonicalTransaction { @@ -123,10 +120,8 @@ export function buildProposeUpgrade( bootloaderHash?: BytesLike, defaultAccountHash?: BytesLike, verifier?: string, - newAllowList?: string, l2ProtocolUpgradeTx?: L2CanonicalTransaction ): ProposedUpgrade { - newAllowList = newAllowList ?? ethers.constants.AddressZero; bootloaderHash = bootloaderHash ?? ethers.constants.HashZero; defaultAccountHash = defaultAccountHash ?? ethers.constants.HashZero; l1ContractsUpgradeCalldata = l1ContractsUpgradeCalldata ?? '0x'; @@ -142,8 +137,7 @@ export function buildProposeUpgrade( postUpgradeCalldata, upgradeTimestamp, factoryDeps: [], - newProtocolVersion, - newAllowList + newProtocolVersion }; } @@ -171,43 +165,6 @@ export function prepareDefaultCalldataForL2upgrade(forcedDeployments: ForceDeplo return complexUpgraderCalldata; } -interface GovernanceTx { - scheduleCalldata: string; - executeCalldata: string; - operation: any; -} - -function prepareGovernanceTxs(target: string, data: BytesLike): GovernanceTx { - const govCall = { - target: target, - value: 0, - data: data - }; - - const operation = { - calls: [govCall], - predecessor: ethers.constants.HashZero, - salt: ethers.constants.HashZero - }; - - const governance = new GovernanceFactory(); - - // Get transaction data of the `scheduleTransparent` - const scheduleCalldata = governance.interface.encodeFunctionData('scheduleTransparent', [ - operation, - 0 // delay - ]); - - // Get transaction data of the `execute` - const executeCalldata = governance.interface.encodeFunctionData('execute', [operation]); - - return { - scheduleCalldata, - executeCalldata, - operation - }; -} - function prepareChainAdminCalldata(target: string, data: BytesLike): string { const call = { target: target, @@ -221,14 +178,13 @@ function prepareChainAdminCalldata(target: string, data: BytesLike): string { return calldata; } -export function prepareTransparentUpgradeCalldataForNewGovernance( +export function prepareUpgradeCalldata( oldProtocolVersion, oldProtocolVersionDeadline, newProtocolVersion, initCalldata, upgradeAddress: string, facetCuts: FacetCut[], - stmAddress: string, zksyncAddress: string, prepareDirectOperation?: boolean, chainId?: string @@ -247,9 +203,6 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( newProtocolVersion ]); - const { scheduleCalldata: stmScheduleTransparentOperation, executeCalldata: stmExecuteOperation } = - prepareGovernanceTxs(stmAddress, stmUpgradeCalldata); - // Prepare calldata for upgrading diamond proxy let adminFacet = new AdminFacetFactory(); const diamondProxyUpgradeCalldata = adminFacet.interface.encodeFunctionData('upgradeChainFromVersion', [ @@ -257,30 +210,12 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( diamondCut ]); - const { - scheduleCalldata: scheduleTransparentOperation, - executeCalldata: executeOperation, - operation: governanceOperation - } = prepareGovernanceTxs(zksyncAddress, diamondProxyUpgradeCalldata); - - const newExecuteChainUpgradeCalldata = prepareChainAdminCalldata(zksyncAddress, diamondProxyUpgradeCalldata); - - const legacyScheduleTransparentOperation = adminFacet.interface.encodeFunctionData('executeUpgrade', [diamondCut]); - const { scheduleCalldata: legacyScheduleOperation, executeCalldata: legacyExecuteOperation } = prepareGovernanceTxs( - zksyncAddress, - legacyScheduleTransparentOperation - ); + const chainAdminUpgradeCalldata = prepareChainAdminCalldata(zksyncAddress, diamondProxyUpgradeCalldata); let result: any = { - stmScheduleTransparentOperation, - stmExecuteOperation, - scheduleTransparentOperation, - executeOperation, - newExecuteChainUpgradeCalldata, - diamondCut, - governanceOperation, - legacyScheduleOperation, - legacyExecuteOperation + stmUpgradeCalldata, + chainAdminUpgradeCalldata, + diamondCut }; if (prepareDirectOperation) { @@ -290,13 +225,9 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( const stmDirecUpgradeCalldata = stm.interface.encodeFunctionData('executeUpgrade', [chainId, diamondCut]); - const { scheduleCalldata: stmScheduleOperationDirect, executeCalldata: stmExecuteOperationDirect } = - prepareGovernanceTxs(stmAddress, stmDirecUpgradeCalldata); - result = { ...result, - stmScheduleOperationDirect, - stmExecuteOperationDirect + stmDirecUpgradeCalldata }; } @@ -305,14 +236,10 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( export function buildDefaultUpgradeTx( environment, - diamondUpgradeProposalId, upgradeAddress, - l2UpgraderAddress, oldProtocolVersion, oldProtocolVersionDeadline, upgradeTimestamp, - newAllowList, - stmAddress, zksyncAddress, postUpgradeCalldataFlag, prepareDirectOperation?, @@ -389,20 +316,18 @@ export function buildDefaultUpgradeTx( bootloaderHash, defaultAAHash, cryptoVerifierAddress, - newAllowList, l2UpgradeTx ); let l1upgradeCalldata = prepareDefaultCalldataForL1upgrade(proposeUpgradeTx); - let upgradeData = prepareTransparentUpgradeCalldataForNewGovernance( + let upgradeData = prepareUpgradeCalldata( oldProtocolVersion, oldProtocolVersionDeadline, packedNewProtocolVersion, l1upgradeCalldata, upgradeAddress, facetCuts, - stmAddress, zksyncAddress, prepareDirectOperation, chainId @@ -414,7 +339,6 @@ export function buildDefaultUpgradeTx( upgradeAddress, protocolVersionSemVer: newProtocolVersionSemVer, packedProtocolVersion: packedNewProtocolVersion, - diamondUpgradeProposalId, upgradeTimestamp, ...upgradeData }; @@ -423,31 +347,6 @@ export function buildDefaultUpgradeTx( console.log('Default upgrade transactions are generated'); } -async function sendTransaction( - calldata: BytesLike, - privateKey: string, - l1rpc: string, - to: string, - environment: string, - gasPrice: ethers.BigNumber, - nonce: number -) { - const wallet = getWallet(l1rpc, privateKey); - gasPrice = gasPrice ?? (await wallet.provider.getGasPrice()); - nonce = nonce ?? (await wallet.getTransactionCount()); - const tx = await wallet.sendTransaction({ - to, - data: calldata, - value: 0, - gasLimit: 10_000_000, - gasPrice, - nonce - }); - console.log('Transaction hash: ', tx.hash); - await tx.wait(); - console.log('Transaction is executed'); -} - export function getWallet(l1rpc, privateKey) { if (!l1rpc) { l1rpc = web3Url(); @@ -462,99 +361,6 @@ export function getWallet(l1rpc, privateKey) { ).connect(provider); } -async function sendPreparedTx( - privateKey: string, - l1rpc: string, - environment: string, - gasPrice: ethers.BigNumber, - nonce: number, - governanceAddr: string, - transactionsJsonField: string, - logText: string -) { - const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - const calldata = transactions[transactionsJsonField]; - - console.log(`${logText} for protocolVersion ${transactions.protocolVersion}`); - await sendTransaction(calldata, privateKey, l1rpc, governanceAddr, environment, gasPrice, nonce); -} - -async function cancelUpgrade( - privateKey: string, - l1rpc: string, - zksyncAddress: string, - environment: string, - gasPrice: ethers.BigNumber, - nonce: number, - execute: boolean, - newGovernanceAddress: string -) { - if (newGovernanceAddress != null) { - let wallet = getWallet(l1rpc, privateKey); - const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - - let governance = GovernanceFactory.connect(newGovernanceAddress, wallet); - const operation = transactions.governanceOperation; - - const operationId = await governance.hashOperation(operation); - - console.log(`Cancel upgrade operation with id: ${operationId}`); - if (execute) { - const tx = await governance.cancel(operationId); - await tx.wait(); - console.log('Operation canceled'); - } else { - const calldata = governance.interface.encodeFunctionData('cancel', [operationId]); - console.log(`Cancel upgrade calldata: ${calldata}`); - } - } else { - zksyncAddress = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; - let wallet = getWallet(l1rpc, privateKey); - let zkSync = IZkSyncFactory.connect(zksyncAddress, wallet); - const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - - const transparentUpgrade = transactions.transparentUpgrade; - const diamondUpgradeProposalId = transactions.diamondUpgradeProposalId; - - const proposalHash = await zkSync.upgradeProposalHash( - transparentUpgrade, - diamondUpgradeProposalId, - ethers.constants.HashZero - ); - - console.log(`Cancel upgrade with hash: ${proposalHash}`); - let cancelUpgradeCalldata = zkSync.interface.encodeFunctionData('cancelUpgradeProposal', [proposalHash]); - if (execute) { - await sendTransaction( - cancelUpgradeCalldata, - privateKey, - l1rpc, - zksyncAddress, - environment, - gasPrice, - nonce - ); - } else { - console.log(`Cancel upgrade calldata: ${cancelUpgradeCalldata}`); - } - } -} - -async function getNewDiamondUpgradeProposalId(l1rpc: string, zksyncAddress: string) { - zksyncAddress = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; - // We don't care about the wallet here, we just need to make a get call. - let wallet = getWallet(l1rpc, undefined); - let zkSync = IZkSyncFactory.connect(zksyncAddress, wallet); - let proposalId = await zkSync.getCurrentProposalId(); - proposalId = proposalId.add(1); - console.log( - `New proposal id: ${proposalId} for ${zksyncAddress} network: ${JSON.stringify( - await wallet.provider.getNetwork() - )}` - ); - return proposalId; -} - export const command = new Command('transactions').description( 'prepare the transactions and their calldata for the upgrade' ); @@ -564,223 +370,23 @@ command .requiredOption('--upgrade-timestamp ') .option('--upgrade-address ') .option('--environment ') - .option('--new-allow-list ') - .option('--l2-upgrader-address ') - .option('--diamond-upgrade-proposal-id ') .option('--old-protocol-version ') .option('--old-protocol-version-deadline ') .option('--l1rpc ') .option('--zksync-address ') - .option('--state-transition-manager-address ') .option('--chain-id ') .option('--prepare-direct-operation ') - .option('--use-new-governance') .option('--post-upgrade-calldata') .action(async (options) => { - if (!options.useNewGovernance) { - // TODO(X): remove old governance functionality from the protocol upgrade tool - throw new Error('Old governance is not supported anymore'); - } - - let diamondUpgradeProposalId = options.diamondUpgradeProposalId; - if (!diamondUpgradeProposalId && !options.useNewGovernance) { - diamondUpgradeProposalId = await getNewDiamondUpgradeProposalId(options.l1rpc, options.zksyncAddress); - } - buildDefaultUpgradeTx( options.environment, - diamondUpgradeProposalId, options.upgradeAddress, - options.l2UpgraderAddress, options.oldProtocolVersion, options.oldProtocolVersionDeadline, options.upgradeTimestamp, - options.newAllowList, - options.stateTransitionManagerAddress, options.zksyncAddress, options.postUpgradeCalldata, options.prepareDirectOperation, options.chainId ); }); - -command - .command('propose-upgrade-stm') - .option('--environment ') - .option('--private-key ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmScheduleTransparentOperation', - 'Proposing upgrade for STM' - ); - }); - -command - .command('execute-upgrade-stm') - .option('--environment ') - .option('--private-key ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmExecuteOperation', - 'Executing upgrade for STM' - ); - }); - -command - .command('propose-upgrade') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'scheduleTransparentOperation', - 'Proposing "upgradeChainFromVersion" upgrade' - ); - }); - -command - .command('execute-upgrade') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'executeOperation', - 'Executing "upgradeChainFromVersion" upgrade' - ); - }); - -command - .command('propose-upgrade-direct') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmScheduleOperationDirect', - 'Executing direct upgrade via STM' - ); - }); - -command - .command('execute-upgrade-direct') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmExecuteOperationDirect', - 'Executing direct upgrade via STM' - ); - }); - -command - .command('cancel-upgrade') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--execute') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await cancelUpgrade( - options.privateKey, - options.l1rpc, - options.zksyncAddress, - options.environment, - options.gasPrice, - options.nonce, - options.execute, - options.newGovernance - ); - }); From 8790240f95211b586df6ac5a9a0c1d948b425aa7 Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Wed, 16 Oct 2024 21:26:22 +0700 Subject: [PATCH 6/9] fix(ci): Conditional to build contracts in new CI (#3106) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Do not build contracts if not needed in new CI ## Why ❔ Speed up CI ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .github/workflows/ci.yml | 5 ++++ .../new-build-contract-verifier-template.yml | 25 ++++++------------- .github/workflows/new-build-core-template.yml | 25 ++++++------------- 3 files changed, 21 insertions(+), 34 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 47ae3c517517..2f29fe98f0e6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -42,6 +42,9 @@ jobs: - '!prover/extract-setup-data-keys.sh' - 'docker/prover*/**' - '.github/workflows/build-prover-template.yml' + - '.github/workflows/new-build-prover-template.yml' + - '.github/workflows/build-witness-generator-template.yml' + - '.github/workflows/new-build-witness-generator-template.yml' - '.github/workflows/ci-prover-reusable.yml' - 'docker-compose-runner-nightly.yml' - '!**/*.md' @@ -53,7 +56,9 @@ jobs: - 'docker/external-node/**' - 'docker/server/**' - '.github/workflows/build-core-template.yml' + - '.github/workflows/new-build-core-template.yml' - '.github/workflows/build-contract-verifier-template.yml' + - '.github/workflows/new-build-contract-verifier-template.yml' - '.github/workflows/ci-core-reusable.yml' - '.github/workflows/ci-core-lint-reusable.yml' - 'Cargo.toml' diff --git a/.github/workflows/new-build-contract-verifier-template.yml b/.github/workflows/new-build-contract-verifier-template.yml index b5286782fad7..9b23cda6f02a 100644 --- a/.github/workflows/new-build-contract-verifier-template.yml +++ b/.github/workflows/new-build-contract-verifier-template.yml @@ -74,36 +74,30 @@ jobs: tar -C ./contracts -zxf system-contracts.tar.gz - name: Install Apt dependencies + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config - name: Install Node + if: env.BUILD_CONTRACTS == 'true' uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version: 20 cache: 'npm' - name: Install Yarn + if: env.BUILD_CONTRACTS == 'true' run: npm install -g yarn - name: Setup rust + if: env.BUILD_CONTRACTS == 'true' uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 with: toolchain: nightly-2024-08-01 - - name: Install cargo-nextest from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: cargo-nextest - - - name: Install sqlx-cli from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: sqlx-cli - tag: 0.8.1 - - name: Install foundry-zksync + if: env.BUILD_CONTRACTS == 'true' run: | mkdir ./foundry-zksync curl -LO https://github.com/matter-labs/foundry-zksync/releases/download/nightly-15bec2f861b3b4c71e58f85e2b2c9dd722585aa8/foundry_nightly_linux_amd64.tar.gz @@ -112,6 +106,7 @@ jobs: echo "$PWD/foundry-zksync" >> $GITHUB_PATH - name: Pre-download compilers + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | # Download needed versions of vyper compiler @@ -129,18 +124,14 @@ jobs: chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" done - - name: init - shell: bash - run: | - mkdir -p ./volumes/postgres - docker compose up -d postgres - - name: Install zkstack + if: env.BUILD_CONTRACTS == 'true' run: | ./zkstack_cli/zkstackup/install --path ./zkstack_cli/zkstackup/zkstackup zkstackup --local || true - name: build contracts + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | cp etc/tokens/{test,localhost}.json diff --git a/.github/workflows/new-build-core-template.yml b/.github/workflows/new-build-core-template.yml index e8a41a7e0646..c4aeb9180fda 100644 --- a/.github/workflows/new-build-core-template.yml +++ b/.github/workflows/new-build-core-template.yml @@ -79,36 +79,30 @@ jobs: tar -C ./contracts -zxf system-contracts.tar.gz - name: Install Apt dependencies + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config - name: Install Node + if: env.BUILD_CONTRACTS == 'true' uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version: 20 cache: 'npm' - name: Install Yarn + if: env.BUILD_CONTRACTS == 'true' run: npm install -g yarn - name: Setup rust + if: env.BUILD_CONTRACTS == 'true' uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 with: toolchain: nightly-2024-08-01 - - name: Install cargo-nextest from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: cargo-nextest - - - name: Install sqlx-cli from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: sqlx-cli - tag: 0.8.1 - - name: Install foundry-zksync + if: env.BUILD_CONTRACTS == 'true' run: | mkdir ./foundry-zksync curl -LO https://github.com/matter-labs/foundry-zksync/releases/download/nightly-15bec2f861b3b4c71e58f85e2b2c9dd722585aa8/foundry_nightly_linux_amd64.tar.gz @@ -117,6 +111,7 @@ jobs: echo "$PWD/foundry-zksync" >> $GITHUB_PATH - name: Pre-download compilers + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | # Download needed versions of vyper compiler @@ -134,18 +129,14 @@ jobs: chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" done - - name: init - shell: bash - run: | - mkdir -p ./volumes/postgres - docker compose up -d postgres - - name: Install zkstack + if: env.BUILD_CONTRACTS == 'true' run: | ./zkstack_cli/zkstackup/install --path ./zkstack_cli/zkstackup/zkstackup zkstackup --local || true - name: build contracts + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | cp etc/tokens/{test,localhost}.json From 223e4dd59414904f2b26afffc4b72bb78266b783 Mon Sep 17 00:00:00 2001 From: Danil Date: Wed, 16 Oct 2024 16:53:02 +0200 Subject: [PATCH 7/9] fix(zkstack): Move installation always to .local/bin (#3108) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- zkstack_cli/zkstackup/install | 3 +-- zkstack_cli/zkstackup/zkstackup | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/zkstack_cli/zkstackup/install b/zkstack_cli/zkstackup/install index f20ba4dd545a..849f0699bc32 100755 --- a/zkstack_cli/zkstackup/install +++ b/zkstack_cli/zkstackup/install @@ -3,8 +3,7 @@ set -eo pipefail BIN_URL="https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/zkstackup" -HOME_DIR=${XDG_CONFIG_HOME:-$HOME} -BIN_DIR="$HOME_DIR/.local/bin" +BIN_DIR="$HOME/.local/bin" BIN_PATH="$BIN_DIR/zkstackup" main() { diff --git a/zkstack_cli/zkstackup/zkstackup b/zkstack_cli/zkstackup/zkstackup index 20a061620f9a..e91bbc17905c 100755 --- a/zkstack_cli/zkstackup/zkstackup +++ b/zkstack_cli/zkstackup/zkstackup @@ -1,8 +1,7 @@ #!/usr/bin/env bash set -eo pipefail -HOME_DIR=${XDG_CONFIG_HOME:-$HOME} -LOCAL_DIR=${LOCAL_DIR:-"$HOME_DIR/.local"} +LOCAL_DIR="$HOME/.local/" BIN_DIR="$LOCAL_DIR/bin" BINS=() From 30ddb292977340beab37a81f75c35480cbdd59d3 Mon Sep 17 00:00:00 2001 From: Danil Date: Wed, 16 Oct 2024 16:53:26 +0200 Subject: [PATCH 8/9] fix(call_tracer): Flat call tracer fixes for blocks (#3095) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Make flat call tracer more compatible ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --------- Signed-off-by: Danil --- ...16fe37110ebc3fb3981b2626a0bf2edd00e69.json | 40 +++++++++ ...9027b18d108a05f5855115ba36045e3b1850.json} | 12 ++- ...c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json | 28 ------- core/lib/dal/src/blocks_web3_dal.rs | 32 ++++++-- core/lib/dal/src/transactions_dal.rs | 23 ++++-- core/lib/types/src/api/mod.rs | 9 +- core/lib/types/src/debug_flat_call.rs | 17 ++++ .../api_server/src/web3/namespaces/debug.rs | 82 +++++++++---------- core/node/api_server/src/web3/tests/debug.rs | 29 +++---- 9 files changed, 163 insertions(+), 109 deletions(-) create mode 100644 core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json rename core/lib/dal/.sqlx/{query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json => query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json} (50%) delete mode 100644 core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json diff --git a/core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json b/core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json new file mode 100644 index 000000000000..189e28f565d4 --- /dev/null +++ b/core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n protocol_version,\n index_in_block,\n miniblocks.number AS \"miniblock_number!\",\n miniblocks.hash AS \"miniblocks_hash!\"\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "index_in_block", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "miniblock_number!", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "miniblocks_hash!", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Bytea" + ] + }, + "nullable": [ + true, + true, + false, + false + ] + }, + "hash": "0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69" +} diff --git a/core/lib/dal/.sqlx/query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json b/core/lib/dal/.sqlx/query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json similarity index 50% rename from core/lib/dal/.sqlx/query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json rename to core/lib/dal/.sqlx/query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json index 06d3461c3fa3..4f44879b6ec3 100644 --- a/core/lib/dal/.sqlx/query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json +++ b/core/lib/dal/.sqlx/query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json @@ -1,12 +1,17 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "query": "\n SELECT\n protocol_version,\n hash\n FROM\n miniblocks\n WHERE\n number = $1\n ", "describe": { "columns": [ { "ordinal": 0, "name": "protocol_version", "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "hash", + "type_info": "Bytea" } ], "parameters": { @@ -15,8 +20,9 @@ ] }, "nullable": [ - true + true, + false ] }, - "hash": "894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2" + "hash": "2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850" } diff --git a/core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json b/core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json deleted file mode 100644 index 3b8accb4fda2..000000000000 --- a/core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version,\n index_in_block\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 1, - "name": "index_in_block", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Bytea" - ] - }, - "nullable": [ - true, - true - ] - }, - "hash": "96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d" -} diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 829e15b5710a..4cb577986380 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -5,6 +5,7 @@ use zksync_db_connection::{ use zksync_system_constants::EMPTY_UNCLES_HASH; use zksync_types::{ api, + debug_flat_call::CallTraceMeta, fee_model::BatchFeeInput, l2_to_l1_log::L2ToL1Log, web3::{BlockHeader, Bytes}, @@ -531,11 +532,12 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_traces_for_l2_block( &mut self, block_number: L2BlockNumber, - ) -> DalResult> { - let protocol_version = sqlx::query!( + ) -> DalResult> { + let row = sqlx::query!( r#" SELECT - protocol_version + protocol_version, + hash FROM miniblocks WHERE @@ -543,14 +545,20 @@ impl BlocksWeb3Dal<'_, '_> { "#, i64::from(block_number.0) ) - .try_map(|row| row.protocol_version.map(parse_protocol_version).transpose()) + .try_map(|row| { + row.protocol_version + .map(parse_protocol_version) + .transpose() + .map(|val| (val, H256::from_slice(&row.hash))) + }) .instrument("get_traces_for_l2_block#get_l2_block_protocol_version_id") .with_arg("l2_block_number", &block_number) .fetch_optional(self.storage) .await?; - let Some(protocol_version) = protocol_version else { + let Some((protocol_version, block_hash)) = row else { return Ok(Vec::new()); }; + let protocol_version = protocol_version.unwrap_or_else(ProtocolVersionId::last_potentially_undefined); @@ -577,9 +585,15 @@ impl BlocksWeb3Dal<'_, '_> { .await? .into_iter() .map(|call_trace| { - let hash = H256::from_slice(&call_trace.tx_hash); + let tx_hash = H256::from_slice(&call_trace.tx_hash); let index = call_trace.tx_index_in_block.unwrap_or_default() as usize; - (call_trace.into_call(protocol_version), hash, index) + let meta = CallTraceMeta { + index_in_block: index, + tx_hash, + block_number: block_number.0, + block_hash, + }; + (call_trace.into_call(protocol_version), meta) }) .collect()) } @@ -1105,9 +1119,9 @@ mod tests { .await .unwrap(); assert_eq!(traces.len(), 2); - for ((trace, hash, _index), tx_result) in traces.iter().zip(&tx_results) { + for ((trace, meta), tx_result) in traces.iter().zip(&tx_results) { let expected_trace = tx_result.call_trace().unwrap(); - assert_eq!(&tx_result.hash, hash); + assert_eq!(tx_result.hash, meta.tx_hash); assert_eq!(*trace, expected_trace); } } diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 67c965312bd4..5314e9799b33 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -10,9 +10,10 @@ use zksync_db_connection::{ utils::pg_interval_from_duration, }; use zksync_types::{ - block::L2BlockExecutionData, l1::L1Tx, l2::L2Tx, protocol_upgrade::ProtocolUpgradeTx, Address, - ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, L2BlockNumber, PriorityOpId, - ProtocolVersionId, Transaction, H256, PROTOCOL_UPGRADE_TX_TYPE, U256, + block::L2BlockExecutionData, debug_flat_call::CallTraceMeta, l1::L1Tx, l2::L2Tx, + protocol_upgrade::ProtocolUpgradeTx, Address, ExecuteTransactionCommon, L1BatchNumber, + L1BlockNumber, L2BlockNumber, PriorityOpId, ProtocolVersionId, Transaction, H256, + PROTOCOL_UPGRADE_TX_TYPE, U256, }; use zksync_utils::u256_to_big_decimal; use zksync_vm_interface::{ @@ -2131,12 +2132,17 @@ impl TransactionsDal<'_, '_> { Ok(data) } - pub async fn get_call_trace(&mut self, tx_hash: H256) -> DalResult> { + pub async fn get_call_trace( + &mut self, + tx_hash: H256, + ) -> DalResult> { let row = sqlx::query!( r#" SELECT protocol_version, - index_in_block + index_in_block, + miniblocks.number AS "miniblock_number!", + miniblocks.hash AS "miniblocks_hash!" FROM transactions INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number @@ -2177,7 +2183,12 @@ impl TransactionsDal<'_, '_> { .map(|call_trace| { ( parse_call_trace(&call_trace.call_trace, protocol_version), - row.index_in_block.unwrap_or_default() as usize, + CallTraceMeta { + index_in_block: row.index_in_block.unwrap_or_default() as usize, + tx_hash, + block_number: row.miniblock_number as u32, + block_hash: H256::from_slice(&row.miniblocks_hash), + }, ) })) } diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 103b6de1fb38..1c7672264cb4 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -14,8 +14,9 @@ pub use crate::transaction_request::{ Eip712Meta, SerializationTransactionError, TransactionRequest, }; use crate::{ - debug_flat_call::DebugCallFlat, protocol_version::L1VerifierConfig, Address, L2BlockNumber, - ProtocolVersionId, + debug_flat_call::{DebugCallFlat, ResultDebugCallFlat}, + protocol_version::L1VerifierConfig, + Address, L2BlockNumber, ProtocolVersionId, }; pub mod en; @@ -763,11 +764,11 @@ pub enum BlockStatus { #[serde(untagged)] pub enum CallTracerBlockResult { CallTrace(Vec), - FlatCallTrace(Vec), + FlatCallTrace(Vec), } impl CallTracerBlockResult { - pub fn unwrap_flat(self) -> Vec { + pub fn unwrap_flat(self) -> Vec { match self { Self::CallTrace(_) => panic!("Result is a FlatCallTrace"), Self::FlatCallTrace(trace) => trace, diff --git a/core/lib/types/src/debug_flat_call.rs b/core/lib/types/src/debug_flat_call.rs index 89a008b5fb5f..5809026e521c 100644 --- a/core/lib/types/src/debug_flat_call.rs +++ b/core/lib/types/src/debug_flat_call.rs @@ -3,6 +3,13 @@ use zksync_basic_types::{web3::Bytes, U256}; use crate::{api::DebugCallType, Address, H256}; +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ResultDebugCallFlat { + pub tx_hash: H256, + pub result: Vec, +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct DebugCallFlat { @@ -12,6 +19,8 @@ pub struct DebugCallFlat { pub trace_address: Vec, pub transaction_position: usize, pub transaction_hash: H256, + pub block_number: u32, + pub block_hash: H256, pub r#type: DebugCallType, } @@ -32,3 +41,11 @@ pub struct CallResult { pub output: Bytes, pub gas_used: U256, } + +#[derive(Debug, Clone, PartialEq, Default)] +pub struct CallTraceMeta { + pub index_in_block: usize, + pub tx_hash: H256, + pub block_number: u32, + pub block_hash: H256, +} diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index e296fe87faa2..726f35ac29a9 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -7,7 +7,7 @@ use zksync_types::{ BlockId, BlockNumber, CallTracerBlockResult, CallTracerResult, DebugCall, DebugCallType, ResultDebugCall, SupportedTracers, TracerConfig, }, - debug_flat_call::{Action, CallResult, DebugCallFlat}, + debug_flat_call::{Action, CallResult, CallTraceMeta, DebugCallFlat, ResultDebugCallFlat}, l2::L2Tx, transaction_request::CallRequest, web3, H256, U256, @@ -31,8 +31,7 @@ impl DebugNamespace { pub(crate) fn map_call( call: Call, - index: usize, - transaction_hash: H256, + meta: CallTraceMeta, tracer_option: TracerConfig, ) -> CallTracerResult { match tracer_option.tracer { @@ -42,14 +41,13 @@ impl DebugNamespace { )), SupportedTracers::FlatCallTracer => { let mut calls = vec![]; - let mut traces = vec![index]; + let mut traces = vec![meta.index_in_block]; Self::flatten_call( call, &mut calls, &mut traces, tracer_option.tracer_config.only_top_call, - index, - transaction_hash, + &meta, ); CallTracerResult::FlatCallTrace(calls) } @@ -89,8 +87,7 @@ impl DebugNamespace { calls: &mut Vec, trace_address: &mut Vec, only_top_call: bool, - transaction_position: usize, - transaction_hash: H256, + meta: &CallTraceMeta, ) { let subtraces = call.calls.len(); let debug_type = match call.r#type { @@ -120,22 +117,17 @@ impl DebugNamespace { result, subtraces, trace_address: trace_address.clone(), // Clone the current trace address - transaction_position, - transaction_hash, + transaction_position: meta.index_in_block, + transaction_hash: meta.tx_hash, + block_number: meta.block_number, + block_hash: meta.block_hash, r#type: DebugCallType::Call, }); if !only_top_call { for (number, call) in call.calls.into_iter().enumerate() { trace_address.push(number); - Self::flatten_call( - call, - calls, - trace_address, - false, - transaction_position, - transaction_hash, - ); + Self::flatten_call(call, calls, trace_address, false, meta); trace_address.pop(); } } @@ -158,6 +150,7 @@ impl DebugNamespace { let mut connection = self.state.acquire_connection().await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; + // let block_hash = block_hash self.state. self.current_method() .set_block_diff(self.state.last_sealed_l2_block.diff(block_number)); @@ -172,25 +165,31 @@ impl DebugNamespace { SupportedTracers::CallTracer => CallTracerBlockResult::CallTrace( call_traces .into_iter() - .map(|(call, _, _)| ResultDebugCall { + .map(|(call, _)| ResultDebugCall { result: Self::map_default_call(call, options.tracer_config.only_top_call), }) .collect(), ), SupportedTracers::FlatCallTracer => { - let mut flat_calls = vec![]; - for (call, tx_hash, tx_index) in call_traces { - let mut traces = vec![tx_index]; - Self::flatten_call( - call, - &mut flat_calls, - &mut traces, - options.tracer_config.only_top_call, - tx_index, - tx_hash, - ); - } - CallTracerBlockResult::FlatCallTrace(flat_calls) + let res = call_traces + .into_iter() + .map(|(call, meta)| { + let mut traces = vec![meta.index_in_block]; + let mut flat_calls = vec![]; + Self::flatten_call( + call, + &mut flat_calls, + &mut traces, + options.tracer_config.only_top_call, + &meta, + ); + ResultDebugCallFlat { + tx_hash: meta.tx_hash, + result: flat_calls, + } + }) + .collect(); + CallTracerBlockResult::FlatCallTrace(res) } }; Ok(result) @@ -207,13 +206,8 @@ impl DebugNamespace { .get_call_trace(tx_hash) .await .map_err(DalError::generalize)?; - Ok(call_trace.map(|(call_trace, index_in_block)| { - Self::map_call( - call_trace, - index_in_block, - tx_hash, - options.unwrap_or_default(), - ) + Ok(call_trace.map(|(call_trace, meta)| { + Self::map_call(call_trace, meta, options.unwrap_or_default()) })) } @@ -305,8 +299,6 @@ impl DebugNamespace { )) } }; - // It's a call request, it's safe to keep it zero - let hash = H256::zero(); let call = Call::new_high_level( call.common_data.fee.gas_limit.as_u64(), result.vm.statistics.gas_used, @@ -316,6 +308,12 @@ impl DebugNamespace { revert_reason, result.call_traces, ); - Ok(Self::map_call(call, 0, hash, options)) + let number = block_args.resolved_block_number(); + let meta = CallTraceMeta { + block_number: number.0, + // It's a call request, it's safe to everything as default + ..Default::default() + }; + Ok(Self::map_call(call, meta, options)) } } diff --git a/core/node/api_server/src/web3/tests/debug.rs b/core/node/api_server/src/web3/tests/debug.rs index 4f021b777aec..28a22511fa98 100644 --- a/core/node/api_server/src/web3/tests/debug.rs +++ b/core/node/api_server/src/web3/tests/debug.rs @@ -139,32 +139,27 @@ impl HttpTest for TraceBlockFlatTest { .await? .unwrap_flat(); - // A transaction with 2 nested calls will convert into 3 Flattened calls. - // Also in this test, all tx have the same # of nested calls - assert_eq!( - block_traces.len(), - tx_results.len() * (tx_results[0].call_traces.len() + 1) - ); + assert_eq!(block_traces.len(), tx_results.len()); + + let tx_traces = &block_traces.first().unwrap().result; // First tx has 2 nested calls, thus 2 sub-traces - assert_eq!(block_traces[0].subtraces, 2); - assert_eq!(block_traces[0].trace_address, [0]); + assert_eq!(tx_traces[0].subtraces, 2); + assert_eq!(tx_traces[0].trace_address, [0]); // Second flat-call (fist nested call) do not have nested calls - assert_eq!(block_traces[1].subtraces, 0); - assert_eq!(block_traces[1].trace_address, [0, 0]); + assert_eq!(tx_traces[1].subtraces, 0); + assert_eq!(tx_traces[1].trace_address, [0, 0]); - let top_level_call_indexes = [0, 3, 6]; + let top_level_call_indexes = [0, 1, 2]; let top_level_traces = top_level_call_indexes .iter() .map(|&i| block_traces[i].clone()); for (top_level_trace, tx_result) in top_level_traces.zip(&tx_results) { - assert_eq!(top_level_trace.action.from, Address::zero()); - assert_eq!(top_level_trace.action.to, BOOTLOADER_ADDRESS); - assert_eq!( - top_level_trace.action.gas, - tx_result.transaction.gas_limit() - ); + let trace = top_level_trace.result.first().unwrap(); + assert_eq!(trace.action.from, Address::zero()); + assert_eq!(trace.action.to, BOOTLOADER_ADDRESS); + assert_eq!(trace.action.gas, tx_result.transaction.gas_limit()); } // TODO: test inner calls } From 899ffc074ac21cf0c3c0bb1e0c876cfd3d8fda72 Mon Sep 17 00:00:00 2001 From: koloz193 Date: Wed, 16 Oct 2024 14:43:07 -0400 Subject: [PATCH 9/9] feat(upgrade): update tool to generate calldata for setting new chain creation params (#3117) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .../protocol-upgrade/src/transaction.ts | 58 ++++++++++++++++++- 1 file changed, 56 insertions(+), 2 deletions(-) diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts index e7a3f32b3227..bd7df8ab456b 100644 --- a/infrastructure/protocol-upgrade/src/transaction.ts +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -27,12 +27,26 @@ import * as path from 'path'; const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); +export enum Action { + Add = 0, + Replace = 1, + Remove = 2 +} + export interface DiamondCutData { facetCuts: FacetCut[]; initAddress: string; initCalldata: string; } +export interface ChainCreationParams { + genesisUpgrade: string; + genesisBatchHash: string; + genesisIndexRepeatedStorageChanges: number; + genesisBatchCommitment: string; + diamondCut: DiamondCutData; +} + export interface ForceDeployment { // The bytecode hash to put on an address bytecodeHash: BytesLike; @@ -186,6 +200,10 @@ export function prepareUpgradeCalldata( upgradeAddress: string, facetCuts: FacetCut[], zksyncAddress: string, + genesisUpgradeAddress: string, + genesisBatchHash: string, + genesisIndexRepeatedStorageChanges: number, + genesisBatchCommitment: string, prepareDirectOperation?: boolean, chainId?: string ) { @@ -194,6 +212,21 @@ export function prepareUpgradeCalldata( initAddress: upgradeAddress, initCalldata }; + + let chainCreationDiamondCut: DiamondCutData = { + facetCuts: facetCuts.filter((cut) => cut.action == Action.Add), + initAddress: genesisUpgradeAddress, + initCalldata: '0x' + }; + + let chainCreationParams: ChainCreationParams = { + genesisUpgrade: genesisUpgradeAddress, + genesisBatchHash, + genesisIndexRepeatedStorageChanges, + genesisBatchCommitment, + diamondCut: chainCreationDiamondCut + }; + // Prepare calldata for STM let stm = new StateTransitionManagerFactory(); const stmUpgradeCalldata = stm.interface.encodeFunctionData('setNewVersionUpgrade', [ @@ -203,6 +236,10 @@ export function prepareUpgradeCalldata( newProtocolVersion ]); + const stmSetChainCreationCalldata = stm.interface.encodeFunctionData('setChainCreationParams', [ + chainCreationParams + ]); + // Prepare calldata for upgrading diamond proxy let adminFacet = new AdminFacetFactory(); const diamondProxyUpgradeCalldata = adminFacet.interface.encodeFunctionData('upgradeChainFromVersion', [ @@ -215,7 +252,8 @@ export function prepareUpgradeCalldata( let result: any = { stmUpgradeCalldata, chainAdminUpgradeCalldata, - diamondCut + diamondCut, + stmSetChainCreationCalldata }; if (prepareDirectOperation) { @@ -242,6 +280,10 @@ export function buildDefaultUpgradeTx( upgradeTimestamp, zksyncAddress, postUpgradeCalldataFlag, + genesisUpgradeAddress, + genesisBatchHash, + genesisIndexRepeatedStorageChanges, + genesisBatchCommitment, prepareDirectOperation?, chainId? ) { @@ -329,6 +371,10 @@ export function buildDefaultUpgradeTx( upgradeAddress, facetCuts, zksyncAddress, + genesisUpgradeAddress, + genesisBatchHash, + genesisIndexRepeatedStorageChanges, + genesisBatchCommitment, prepareDirectOperation, chainId ); @@ -376,7 +422,11 @@ command .option('--zksync-address ') .option('--chain-id ') .option('--prepare-direct-operation ') - .option('--post-upgrade-calldata') + .option('--post-upgrade-calldata ') + .option('--genesis-upgrade-address ') + .option('--genesis-batch-hash ') + .option('--genesis-index-repeated-storage-changes ') + .option('--genesis-batch-commitment ') .action(async (options) => { buildDefaultUpgradeTx( options.environment, @@ -386,6 +436,10 @@ command options.upgradeTimestamp, options.zksyncAddress, options.postUpgradeCalldata, + options.genesisUpgradeAddress, + options.genesisBatchHash, + options.genesisIndexRepeatedStorageChanges, + options.genesisBatchCommitment, options.prepareDirectOperation, options.chainId );