From fd8f05815f9878dd0b3a04703a7b5a32da9bbfbd Mon Sep 17 00:00:00 2001 From: Nisheeth Barthwal Date: Tue, 5 Nov 2024 07:19:42 +0100 Subject: [PATCH] example refactor to employ the strategy approach --- Cargo.lock | 29 ++ Cargo.toml | 4 + crates/anvil/src/eth/backend/mem/mod.rs | 7 +- crates/cheatcodes/src/lib.rs | 1 + crates/cheatcodes/src/zk.rs | 293 +++++++++++++++++ crates/evm/core/src/backend/cow.rs | 4 +- crates/evm/core/src/backend/mod.rs | 410 +++++++++++++----------- crates/evm/core/src/backend/strategy.rs | 176 ++++++++++ crates/evm/core/src/utils.rs | 15 +- crates/evm/evm/Cargo.toml | 1 + crates/evm/evm/src/executors/mod.rs | 37 ++- crates/forge/Cargo.toml | 2 + crates/forge/src/multi_runner.rs | 18 +- crates/foundry-zksync/Cargo.toml | 30 ++ crates/foundry-zksync/src/lib.rs | 202 ++++++++++++ crates/strategy/Cargo.toml | 16 + crates/strategy/src/lib.rs | 25 ++ 17 files changed, 1049 insertions(+), 221 deletions(-) create mode 100644 crates/cheatcodes/src/zk.rs create mode 100644 crates/evm/core/src/backend/strategy.rs create mode 100644 crates/foundry-zksync/Cargo.toml create mode 100644 crates/foundry-zksync/src/lib.rs create mode 100644 crates/strategy/Cargo.toml create mode 100644 crates/strategy/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index cf76344a2..18d930e48 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4618,8 +4618,10 @@ dependencies = [ "foundry-evm", "foundry-evm-abi", "foundry-linking", + "foundry-strategy", "foundry-test-utils", "foundry-wallets", + "foundry-zksync", "foundry-zksync-compiler", "foundry-zksync-core", "futures 0.3.31", @@ -5272,6 +5274,7 @@ dependencies = [ "proptest", "revm", "revm-inspectors", + "serde_json", "thiserror", "tracing", ] @@ -5441,6 +5444,13 @@ dependencies = [ "syn 2.0.79", ] +[[package]] +name = "foundry-strategy" +version = "0.0.2" +dependencies = [ + "foundry-evm-core", +] + [[package]] name = "foundry-test-utils" version = "0.0.2" @@ -5498,6 +5508,25 @@ dependencies = [ "tracing", ] +[[package]] +name = "foundry-zksync" +version = "0.0.2" +dependencies = [ + "alloy-primitives", + "eyre", + "foundry-common", + "foundry-compilers", + "foundry-evm-core", + "foundry-evm-traces", + "foundry-strategy", + "foundry-zksync-core", + "revm", + "revm-inspectors", + "serde", + "serde_json", + "tracing", +] + [[package]] name = "foundry-zksync-compiler" version = "0.0.2" diff --git a/Cargo.toml b/Cargo.toml index 167503ca3..dc6a9d6e3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,6 +22,8 @@ members = [ "crates/forge/", "crates/macros/", "crates/test-utils/", + "crates/foundry-zksync/", + "crates/strategy/", ] resolver = "2" @@ -168,6 +170,8 @@ foundry-macros = { path = "crates/macros" } foundry-test-utils = { path = "crates/test-utils" } foundry-wallets = { path = "crates/wallets" } foundry-linking = { path = "crates/linking" } +foundry-strategy = { path = "crates/strategy" } +foundry-zksync = { path = "crates/foundry-zksync" } foundry-zksync-core = { path = "crates/zksync/core" } foundry-zksync-compiler = { path = "crates/zksync/compiler" } foundry-zksync-inspectors = { path = "crates/zksync/inspectors" } diff --git a/crates/anvil/src/eth/backend/mem/mod.rs b/crates/anvil/src/eth/backend/mem/mod.rs index 0f28e28a5..151f1c0cf 100644 --- a/crates/anvil/src/eth/backend/mem/mod.rs +++ b/crates/anvil/src/eth/backend/mem/mod.rs @@ -857,15 +857,14 @@ impl Backend { } /// Creates an EVM instance with optionally injected precompiles. - fn new_evm_with_inspector_ref( + fn new_evm_with_inspector_ref<'a, DB>( &self, db: DB, env: EnvWithHandlerCfg, - inspector: I, - ) -> revm::Evm<'_, I, WrapDatabaseRef> + inspector: &'a mut dyn InspectorExt>, + ) -> revm::Evm<'a, &'a mut dyn InspectorExt>, WrapDatabaseRef> where DB: revm::DatabaseRef, - I: InspectorExt>, { let mut evm = new_evm_with_inspector_ref(db, env, inspector); if let Some(factory) = &self.precompile_factory { diff --git a/crates/cheatcodes/src/lib.rs b/crates/cheatcodes/src/lib.rs index 30c3938e3..33adb4f45 100644 --- a/crates/cheatcodes/src/lib.rs +++ b/crates/cheatcodes/src/lib.rs @@ -40,6 +40,7 @@ mod evm; mod fs; mod inspector; +mod zk; mod json; diff --git a/crates/cheatcodes/src/zk.rs b/crates/cheatcodes/src/zk.rs new file mode 100644 index 000000000..7715b8c8f --- /dev/null +++ b/crates/cheatcodes/src/zk.rs @@ -0,0 +1,293 @@ +use std::{cell::RefCell, rc::Rc}; + +use alloy_primitives::{Address, Bytes, B256, U256}; +use foundry_evm_core::backend::DatabaseExt; +use foundry_zksync_compiler::DualCompiledContracts; +use foundry_zksync_core::{ + convert::{ConvertH160, ConvertH256, ConvertRU256, ConvertU256}, + get_account_code_key, get_balance_key, get_nonce_key, +}; +use revm::{ + interpreter::{opcode, InstructionResult, Interpreter}, + primitives::{AccountInfo, Bytecode, Env, EvmStorageSlot, HashMap as rHashMap, KECCAK_EMPTY}, + EvmContext, InnerEvmContext, +}; +use zksync_types::{ + block::{pack_block_info, unpack_block_info}, + utils::{decompose_full_nonce, nonces_to_full_nonce}, + ACCOUNT_CODE_STORAGE_ADDRESS, CURRENT_VIRTUAL_BLOCK_INFO_POSITION, KNOWN_CODES_STORAGE_ADDRESS, + L2_BASE_TOKEN_ADDRESS, NONCE_HOLDER_ADDRESS, SYSTEM_CONTEXT_ADDRESS, +}; + +use crate::evm::journaled_account; + +pub trait NetworkCheatcode: std::fmt::Debug { + fn enabled(&self) -> bool; + fn select_evm(&mut self, data: &mut InnerEvmContext<&mut dyn DatabaseExt>); + fn select_custom( + &mut self, + data: &mut InnerEvmContext<&mut dyn DatabaseExt>, + new_env: Option<&Env>, + ); + fn handle_opcode( + &self, + interpreter: &mut Interpreter, + ecx: &mut EvmContext<&mut dyn DatabaseExt>, + ) -> bool; +} + +#[derive(Debug)] +pub struct Zk { + pub use_zk_vm: bool, + pub dual_compiled_contracts: DualCompiledContracts, +} + +impl NetworkCheatcode for Option>>> { + fn enabled(&self) -> bool { + self.as_ref().map_or(false, |n| n.borrow().enabled()) + } + + fn handle_opcode( + &self, + interpreter: &mut Interpreter, + ecx: &mut EvmContext<&mut dyn DatabaseExt>, + ) -> bool { + self.as_ref().map_or(false, |n| n.borrow().handle_opcode(interpreter, ecx)) + } + + fn select_evm(&mut self, data: &mut InnerEvmContext<&mut dyn DatabaseExt>) { + let _ = self.as_mut().map_or((), |n| n.borrow_mut().select_evm(data)); + } + + fn select_custom( + &mut self, + data: &mut InnerEvmContext<&mut dyn DatabaseExt>, + new_env: Option<&Env>, + ) { + let _ = self.as_mut().map_or((), |n| n.borrow_mut().select_custom(data, new_env)); + } +} + +impl NetworkCheatcode for Zk { + fn enabled(&self) -> bool { + self.use_zk_vm + } + + fn handle_opcode( + &self, + interpreter: &mut Interpreter, + ecx: &mut EvmContext<&mut dyn DatabaseExt>, + ) -> bool { + if self.enabled() { + let address = match interpreter.current_opcode() { + opcode::SELFBALANCE => interpreter.contract().target_address, + opcode::BALANCE => { + if interpreter.stack.is_empty() { + interpreter.instruction_result = InstructionResult::StackUnderflow; + return true; + } + + Address::from_word(B256::from(unsafe { interpreter.stack.pop_unsafe() })) + } + _ => return false, + }; + + // Safety: Length is checked above. + let balance = foundry_zksync_core::balance(address, ecx); + + // Skip the current BALANCE instruction since we've already handled it + match interpreter.stack.push(balance) { + Ok(_) => unsafe { + interpreter.instruction_pointer = interpreter.instruction_pointer.add(1); + }, + Err(e) => { + interpreter.instruction_result = e; + } + }; + + return true; + } + + return false; + } + + /// Switch to EVM and translate block info, balances, nonces and deployed codes for persistent + /// accounts + fn select_evm(&mut self, data: &mut InnerEvmContext<&mut dyn DatabaseExt>) { + if !self.use_zk_vm { + tracing::info!("already in EVM"); + return + } + + tracing::info!("switching to EVM"); + self.use_zk_vm = false; + + let system_account = SYSTEM_CONTEXT_ADDRESS.to_address(); + journaled_account(data, system_account).expect("failed to load account"); + let balance_account = L2_BASE_TOKEN_ADDRESS.to_address(); + journaled_account(data, balance_account).expect("failed to load account"); + let nonce_account = NONCE_HOLDER_ADDRESS.to_address(); + journaled_account(data, nonce_account).expect("failed to load account"); + let account_code_account = ACCOUNT_CODE_STORAGE_ADDRESS.to_address(); + journaled_account(data, account_code_account).expect("failed to load account"); + + // TODO we might need to store the deployment nonce under the contract storage + // to not lose it across VMs. + + let block_info_key = CURRENT_VIRTUAL_BLOCK_INFO_POSITION.to_ru256(); + let block_info = data.sload(system_account, block_info_key).unwrap_or_default(); + let (block_number, block_timestamp) = unpack_block_info(block_info.to_u256()); + data.env.block.number = U256::from(block_number); + data.env.block.timestamp = U256::from(block_timestamp); + + let test_contract = data.db.get_test_contract_address(); + for address in data.db.persistent_accounts().into_iter().chain([data.env.tx.caller]) { + info!(?address, "importing to evm state"); + + let balance_key = get_balance_key(address); + let nonce_key = get_nonce_key(address); + + let balance = data.sload(balance_account, balance_key).unwrap_or_default().data; + let full_nonce = data.sload(nonce_account, nonce_key).unwrap_or_default(); + let (tx_nonce, _deployment_nonce) = decompose_full_nonce(full_nonce.to_u256()); + let nonce = tx_nonce.as_u64(); + + let account_code_key = get_account_code_key(address); + let (code_hash, code) = data + .sload(account_code_account, account_code_key) + .ok() + .and_then(|zk_bytecode_hash| { + self.dual_compiled_contracts + .find_by_zk_bytecode_hash(zk_bytecode_hash.to_h256()) + .map(|contract| { + ( + contract.evm_bytecode_hash, + Some(Bytecode::new_raw(Bytes::from( + contract.evm_deployed_bytecode.clone(), + ))), + ) + }) + }) + .unwrap_or_else(|| (KECCAK_EMPTY, None)); + + let account = journaled_account(data, address).expect("failed to load account"); + let _ = std::mem::replace(&mut account.info.balance, balance); + let _ = std::mem::replace(&mut account.info.nonce, nonce); + + if test_contract.map(|addr| addr == address).unwrap_or_default() { + tracing::trace!(?address, "ignoring code translation for test contract"); + } else { + account.info.code_hash = code_hash; + account.info.code.clone_from(&code); + } + } + } + + /// Switch to ZK-VM and translate block info, balances, nonces and deployed codes for persistent + /// accounts + fn select_custom( + &mut self, + data: &mut InnerEvmContext<&mut dyn DatabaseExt>, + new_env: Option<&Env>, + ) { + if self.use_zk_vm { + tracing::info!("already in ZK-VM"); + return + } + + tracing::info!("switching to ZK-VM"); + self.use_zk_vm = true; + + let env = new_env.unwrap_or(data.env.as_ref()); + + let mut system_storage: rHashMap = Default::default(); + let block_info_key = CURRENT_VIRTUAL_BLOCK_INFO_POSITION.to_ru256(); + let block_info = + pack_block_info(env.block.number.as_limbs()[0], env.block.timestamp.as_limbs()[0]); + system_storage.insert(block_info_key, EvmStorageSlot::new(block_info.to_ru256())); + + let mut l2_eth_storage: rHashMap = Default::default(); + let mut nonce_storage: rHashMap = Default::default(); + let mut account_code_storage: rHashMap = Default::default(); + let mut known_codes_storage: rHashMap = Default::default(); + let mut deployed_codes: rHashMap = Default::default(); + + for address in data.db.persistent_accounts().into_iter().chain([data.env.tx.caller]) { + info!(?address, "importing to zk state"); + + let account = journaled_account(data, address).expect("failed to load account"); + let info = &account.info; + + let balance_key = get_balance_key(address); + l2_eth_storage.insert(balance_key, EvmStorageSlot::new(info.balance)); + + // TODO we need to find a proper way to handle deploy nonces instead of replicating + let full_nonce = nonces_to_full_nonce(info.nonce.into(), info.nonce.into()); + + let nonce_key = get_nonce_key(address); + nonce_storage.insert(nonce_key, EvmStorageSlot::new(full_nonce.to_ru256())); + + if let Some(contract) = self.dual_compiled_contracts.iter().find(|contract| { + info.code_hash != KECCAK_EMPTY && info.code_hash == contract.evm_bytecode_hash + }) { + account_code_storage.insert( + get_account_code_key(address), + EvmStorageSlot::new(contract.zk_bytecode_hash.to_ru256()), + ); + known_codes_storage + .insert(contract.zk_bytecode_hash.to_ru256(), EvmStorageSlot::new(U256::ZERO)); + + let code_hash = B256::from_slice(contract.zk_bytecode_hash.as_bytes()); + deployed_codes.insert( + address, + AccountInfo { + balance: info.balance, + nonce: info.nonce, + code_hash, + code: Some(Bytecode::new_raw(Bytes::from( + contract.zk_deployed_bytecode.clone(), + ))), + }, + ); + } else { + tracing::debug!(code_hash = ?info.code_hash, ?address, "no zk contract found") + } + } + + let system_addr = SYSTEM_CONTEXT_ADDRESS.to_address(); + let system_account = journaled_account(data, system_addr).expect("failed to load account"); + system_account.storage.extend(system_storage.clone()); + + let balance_addr = L2_BASE_TOKEN_ADDRESS.to_address(); + let balance_account = + journaled_account(data, balance_addr).expect("failed to load account"); + balance_account.storage.extend(l2_eth_storage.clone()); + + let nonce_addr = NONCE_HOLDER_ADDRESS.to_address(); + let nonce_account = journaled_account(data, nonce_addr).expect("failed to load account"); + nonce_account.storage.extend(nonce_storage.clone()); + + let account_code_addr = ACCOUNT_CODE_STORAGE_ADDRESS.to_address(); + let account_code_account = + journaled_account(data, account_code_addr).expect("failed to load account"); + account_code_account.storage.extend(account_code_storage.clone()); + + let known_codes_addr = KNOWN_CODES_STORAGE_ADDRESS.to_address(); + let known_codes_account = + journaled_account(data, known_codes_addr).expect("failed to load account"); + known_codes_account.storage.extend(known_codes_storage.clone()); + + let test_contract = data.db.get_test_contract_address(); + for (address, info) in deployed_codes { + let account = journaled_account(data, address).expect("failed to load account"); + let _ = std::mem::replace(&mut account.info.balance, info.balance); + let _ = std::mem::replace(&mut account.info.nonce, info.nonce); + if test_contract.map(|addr| addr == address).unwrap_or_default() { + tracing::trace!(?address, "ignoring code translation for test contract"); + } else { + account.info.code_hash = info.code_hash; + account.info.code.clone_from(&info.code); + } + } + } +} diff --git a/crates/evm/core/src/backend/cow.rs b/crates/evm/core/src/backend/cow.rs index 9389daddd..339ee5290 100644 --- a/crates/evm/core/src/backend/cow.rs +++ b/crates/evm/core/src/backend/cow.rs @@ -81,13 +81,13 @@ impl<'a> CowBackend<'a> { pub fn inspect<'b, I: InspectorExt<&'b mut Self>>( &'b mut self, env: &mut EnvWithHandlerCfg, - inspector: I, + mut inspector: I, ) -> eyre::Result { // this is a new call to inspect with a new env, so even if we've cloned the backend // already, we reset the initialized state self.is_initialized = false; self.spec_id = env.handler_cfg.spec_id; - let mut evm = crate::utils::new_evm_with_inspector(self, env.clone(), inspector); + let mut evm = crate::utils::new_evm_with_inspector(self, env.clone(), &mut inspector); let res = evm.transact().wrap_err("backend: failed while inspecting")?; diff --git a/crates/evm/core/src/backend/mod.rs b/crates/evm/core/src/backend/mod.rs index db33dea50..e2c187744 100644 --- a/crates/evm/core/src/backend/mod.rs +++ b/crates/evm/core/src/backend/mod.rs @@ -14,9 +14,6 @@ use alloy_serde::WithOtherFields; use eyre::Context; use foundry_common::{is_known_system_sender, SYSTEM_TRANSACTION_TYPE}; pub use foundry_fork_db::{cache::BlockchainDbMeta, BlockchainDb, SharedBackend}; -use foundry_zksync_core::{ - convert::ConvertH160, ACCOUNT_CODE_STORAGE_ADDRESS, L2_BASE_TOKEN_ADDRESS, NONCE_HOLDER_ADDRESS, -}; use itertools::Itertools; use revm::{ db::{CacheDB, DatabaseRef}, @@ -30,8 +27,10 @@ use revm::{ }; use std::{ collections::{BTreeMap, HashMap, HashSet}, + sync::{Arc, Mutex}, time::Instant, }; +use strategy::{BackendStrategy, BackendStrategyForkInfo, EvmBackendStrategy}; mod diagnostic; pub use diagnostic::RevertDiagnostic; @@ -51,8 +50,10 @@ pub use snapshot::{BackendSnapshot, RevertSnapshotAction, StateSnapshot}; mod fork_type; pub use fork_type::{CachedForkType, ForkType}; +pub mod strategy; + // A `revm::Database` that is used in forking mode -type ForkDB = CacheDB; +pub type ForkDB = CacheDB; /// Represents a numeric `ForkId` valid only for the existence of the `Backend`. /// @@ -444,6 +445,9 @@ struct _ObjectSafe(dyn DatabaseExt); #[derive(Clone, Debug)] #[must_use] pub struct Backend { + /// Custom backend strategy + pub strategy: Arc>, + /// The access point for managing forks forks: MultiFork, // The default in memory db @@ -473,13 +477,6 @@ pub struct Backend { inner: BackendInner, /// Keeps track of the fork type fork_url_type: CachedForkType, - /// TODO: Ensure this parameter is updated on `select_fork`. - /// - /// Keeps track if the backend is in ZK mode. - /// This is required to correctly merge storage when selecting another ZK fork. - /// The balance, nonce and code are stored under zkSync's respective system contract - /// storages. These need to be merged into the forked storage. - pub is_zk: bool, } impl Backend { @@ -491,6 +488,17 @@ impl Backend { Self::new(MultiFork::spawn(), fork) } + /// Creates a new Backend with a spawned multi fork thread. + /// + /// If `fork` is `Some` this will use a `fork` database, otherwise with an in-memory + /// database. + pub fn spawn_with_strategy( + fork: Option, + strategy: Arc>, + ) -> Self { + Self::new_with_strategy(MultiFork::spawn(), fork, strategy) + } + /// Creates a new instance of `Backend` /// /// If `fork` is `Some` this will use a `fork` database, otherwise with an in-memory @@ -498,6 +506,20 @@ impl Backend { /// /// Prefer using [`spawn`](Self::spawn) instead. pub fn new(forks: MultiFork, fork: Option) -> Self { + Self::new_with_strategy(forks, fork, Arc::new(Mutex::new(EvmBackendStrategy))) + } + + /// Creates a new instance of `Backend` + /// + /// If `fork` is `Some` this will use a `fork` database, otherwise with an in-memory + /// database. + /// + /// Prefer using [`spawn`](Self::spawn) instead. + pub fn new_with_strategy( + forks: MultiFork, + fork: Option, + strategy: Arc>, + ) -> Self { trace!(target: "backend", forking_mode=?fork.is_some(), "creating executor backend"); // Note: this will take of registering the `fork` let inner = BackendInner { @@ -506,13 +528,13 @@ impl Backend { }; let mut backend = Self { + strategy, forks, mem_db: CacheDB::new(Default::default()), fork_init_journaled_state: inner.new_journaled_state(), active_fork_ids: None, inner, fork_url_type: Default::default(), - is_zk: false, }; if let Some(fork) = fork { @@ -546,13 +568,13 @@ impl Backend { /// Creates a new instance with a `BackendDatabase::InMemory` cache layer for the `CacheDB` pub fn clone_empty(&self) -> Self { Self { + strategy: self.strategy.clone(), forks: self.forks.clone(), mem_db: CacheDB::new(Default::default()), fork_init_journaled_state: self.inner.new_journaled_state(), active_fork_ids: None, inner: Default::default(), fork_url_type: Default::default(), - is_zk: false, } } @@ -652,41 +674,41 @@ impl Backend { self.inner.has_snapshot_failure = has_snapshot_failure } - /// When creating or switching forks, we update the AccountInfo of the contract - pub(crate) fn update_fork_db( - &self, - active_journaled_state: &mut JournaledState, - target_fork: &mut Fork, - merge_zk_db: bool, - ) { - self.update_fork_db_contracts( - self.inner.persistent_accounts.iter().copied(), - active_journaled_state, - target_fork, - merge_zk_db, - ) - } - - /// Merges the state of all `accounts` from the currently active db into the given `fork` - pub(crate) fn update_fork_db_contracts( - &self, - accounts: impl IntoIterator, - active_journaled_state: &mut JournaledState, - target_fork: &mut Fork, - merge_zk_db: bool, - ) { - if let Some(db) = self.active_fork_db() { - merge_account_data(accounts, db, active_journaled_state, target_fork, merge_zk_db) - } else { - merge_account_data( - accounts, - &self.mem_db, - active_journaled_state, - target_fork, - merge_zk_db, - ) - } - } + // /// When creating or switching forks, we update the AccountInfo of the contract + // pub(crate) fn update_fork_db( + // &self, + // active_journaled_state: &mut JournaledState, + // target_fork: &mut Fork, + // merge_zk_db: bool, + // ) { + // self.update_fork_db_contracts( + // self.inner.persistent_accounts.iter().copied(), + // active_journaled_state, + // target_fork, + // merge_zk_db, + // ) + // } + + // /// Merges the state of all `accounts` from the currently active db into the given `fork` + // pub(crate) fn update_fork_db_contracts( + // &self, + // accounts: impl IntoIterator, + // active_journaled_state: &mut JournaledState, + // target_fork: &mut Fork, + // merge_zk_db: bool, + // ) { + // if let Some(db) = self.active_fork_db() { + // merge_account_data(accounts, db, active_journaled_state, target_fork, merge_zk_db) + // } else { + // merge_account_data( + // accounts, + // &self.mem_db, + // active_journaled_state, + // target_fork, + // merge_zk_db, + // ) + // } + // } /// Returns the memory db used if not in forking mode pub fn mem_db(&self) -> &FoundryEvmInMemoryDB { @@ -778,7 +800,7 @@ impl Backend { /// Initializes settings we need to keep track of. /// /// We need to track these mainly to prevent issues when switching between different evms - pub(crate) fn initialize(&mut self, env: &EnvWithHandlerCfg) { + pub fn initialize(&mut self, env: &EnvWithHandlerCfg) { self.set_caller(env.tx.caller); self.set_spec_id(env.handler_cfg.spec_id); } @@ -796,16 +818,21 @@ impl Backend { pub fn inspect<'a, I: InspectorExt<&'a mut Self>>( &'a mut self, env: &mut EnvWithHandlerCfg, - inspector: I, + mut inspector: I, + extra: Option>, ) -> eyre::Result { self.initialize(env); - let mut evm = crate::utils::new_evm_with_inspector(self, env.clone(), inspector); - let res = evm.transact().wrap_err("backend: failed while inspecting")?; + let strategy = self.strategy.clone(); + let mut guard = strategy.lock().unwrap(); + guard.inspect(self, env, &mut inspector, extra) + // let mut evm = crate::utils::new_evm_with_inspector(self, env.clone(), inspector); + + // let res = evm.transact().wrap_err("backend: failed while inspecting")?; - env.env = evm.context.evm.inner.env; + // env.env = evm.context.evm.inner.env; - Ok(res) + // Ok(res) } /// Executes the configured test call of the `env` without committing state changes @@ -1117,20 +1144,19 @@ impl DatabaseExt for Backend { let fork_id = self.ensure_fork_id(id).cloned()?; let idx = self.inner.ensure_fork_index(&fork_id)?; - let is_current_zk_fork = if let Some(active_fork_id) = self.active_fork_id() { + let current_fork_type = if let Some(active_fork_id) = self.active_fork_id() { self.forks .get_fork_url(self.ensure_fork_id(active_fork_id).cloned()?)? - .map(|url| self.fork_url_type.get(&url).is_zk()) - .unwrap_or_default() + .map(|url| self.fork_url_type.get(&url)) + .unwrap_or(ForkType::Evm) } else { - self.is_zk + ForkType::Zk }; - let is_target_zk_fork = self + let target_fork_type = self .forks .get_fork_url(fork_id.clone())? - .map(|url| self.fork_url_type.get(&url).is_zk()) - .unwrap_or_default(); - let merge_zk_db = is_current_zk_fork && is_target_zk_fork; + .map(|url| self.fork_url_type.get(&url)) + .unwrap_or(ForkType::Evm); let fork_env = self .forks @@ -1199,7 +1225,17 @@ impl DatabaseExt for Backend { caller_account.into() }); - self.update_fork_db(active_journaled_state, &mut fork, merge_zk_db); + self.strategy.lock().unwrap().update_fork_db( + BackendStrategyForkInfo { + active_fork: self.active_fork(), + active_type: current_fork_type, + target_type: target_fork_type, + }, + &self.mem_db, + &self.inner, + active_journaled_state, + &mut fork, + ); // insert the fork back self.inner.set_fork(idx, fork); @@ -1248,7 +1284,11 @@ impl DatabaseExt for Backend { active.journaled_state.depth = journaled_state.depth; for addr in persistent_addrs { - merge_journaled_state_data(addr, journaled_state, &mut active.journaled_state); + strategy::merge_journaled_state_data( + addr, + journaled_state, + &mut active.journaled_state, + ); } // Ensure all previously loaded accounts are present in the journaled state to @@ -1261,7 +1301,7 @@ impl DatabaseExt for Backend { for (addr, acc) in journaled_state.state.iter() { if acc.is_created() { if acc.is_touched() { - merge_journaled_state_data( + strategy::merge_journaled_state_data( *addr, journaled_state, &mut active.journaled_state, @@ -1644,8 +1684,8 @@ pub enum BackendDatabaseSnapshot { /// Represents a fork #[derive(Clone, Debug)] pub struct Fork { - db: ForkDB, - journaled_state: JournaledState, + pub db: ForkDB, + pub journaled_state: JournaledState, } impl Fork { @@ -1828,7 +1868,7 @@ impl BackendInner { // we initialize a _new_ `ForkDB` but keep the state of persistent accounts let mut new_db = ForkDB::new(backend); for addr in self.persistent_accounts.iter().copied() { - merge_db_account_data(addr, &active.db, &mut new_db); + strategy::merge_db_account_data(addr, &active.db, &mut new_db); } active.db = new_db; } @@ -1914,121 +1954,121 @@ pub(crate) fn update_current_env_with_fork_env(current: &mut Env, fork: Env) { current.tx.chain_id = fork.tx.chain_id; } -/// Clones the data of the given `accounts` from the `active` database into the `fork_db` -/// This includes the data held in storage (`CacheDB`) and kept in the `JournaledState`. -pub(crate) fn merge_account_data( - accounts: impl IntoIterator, - active: &CacheDB, - active_journaled_state: &mut JournaledState, - target_fork: &mut Fork, - merge_zk_db: bool, -) { - for addr in accounts.into_iter() { - merge_db_account_data(addr, active, &mut target_fork.db); - if merge_zk_db { - merge_zk_account_data(addr, active, &mut target_fork.db); - } - merge_journaled_state_data(addr, active_journaled_state, &mut target_fork.journaled_state); - } - - // need to mock empty journal entries in case the current checkpoint is higher than the existing - // journal entries - while active_journaled_state.journal.len() > target_fork.journaled_state.journal.len() { - target_fork.journaled_state.journal.push(Default::default()); - } - - *active_journaled_state = target_fork.journaled_state.clone(); -} - -/// Clones the account data from the `active_journaled_state` into the `fork_journaled_state` -fn merge_journaled_state_data( - addr: Address, - active_journaled_state: &JournaledState, - fork_journaled_state: &mut JournaledState, -) { - if let Some(mut acc) = active_journaled_state.state.get(&addr).cloned() { - trace!(?addr, "updating journaled_state account data"); - if let Some(fork_account) = fork_journaled_state.state.get_mut(&addr) { - // This will merge the fork's tracked storage with active storage and update values - fork_account.storage.extend(std::mem::take(&mut acc.storage)); - // swap them so we can insert the account as whole in the next step - std::mem::swap(&mut fork_account.storage, &mut acc.storage); - } - fork_journaled_state.state.insert(addr, acc); - } -} - -/// Clones the account data from the `active` db into the `ForkDB` -fn merge_db_account_data( - addr: Address, - active: &CacheDB, - fork_db: &mut ForkDB, -) { - let mut acc = if let Some(acc) = active.accounts.get(&addr).cloned() { - acc - } else { - // Account does not exist - return; - }; - - if let Some(code) = active.contracts.get(&acc.info.code_hash).cloned() { - fork_db.contracts.insert(acc.info.code_hash, code); - } - - if let Some(fork_account) = fork_db.accounts.get_mut(&addr) { - // This will merge the fork's tracked storage with active storage and update values - fork_account.storage.extend(std::mem::take(&mut acc.storage)); - // swap them so we can insert the account as whole in the next step - std::mem::swap(&mut fork_account.storage, &mut acc.storage); - } - - fork_db.accounts.insert(addr, acc); -} - -/// Clones the zk account data from the `active` db into the `ForkDB` -fn merge_zk_account_data( - addr: Address, - active: &CacheDB, - fork_db: &mut ForkDB, -) { - let mut merge_system_contract_entry = |system_contract: Address, slot: U256| { - let mut acc = if let Some(acc) = active.accounts.get(&system_contract).cloned() { - acc - } else { - // Account does not exist - return; - }; - - let mut storage = Map::::default(); - if let Some(value) = acc.storage.get(&slot) { - storage.insert(slot, *value); - } - - if let Some(fork_account) = fork_db.accounts.get_mut(&system_contract) { - // This will merge the fork's tracked storage with active storage and update values - fork_account.storage.extend(storage); - // swap them so we can insert the account as whole in the next step - std::mem::swap(&mut fork_account.storage, &mut acc.storage); - } else { - std::mem::swap(&mut storage, &mut acc.storage) - } - - fork_db.accounts.insert(system_contract, acc); - }; - - merge_system_contract_entry( - L2_BASE_TOKEN_ADDRESS.to_address(), - foundry_zksync_core::get_balance_key(addr), - ); - merge_system_contract_entry( - ACCOUNT_CODE_STORAGE_ADDRESS.to_address(), - foundry_zksync_core::get_account_code_key(addr), - ); - merge_system_contract_entry( - NONCE_HOLDER_ADDRESS.to_address(), - foundry_zksync_core::get_nonce_key(addr), - ); -} +// /// Clones the data of the given `accounts` from the `active` database into the `fork_db` +// /// This includes the data held in storage (`CacheDB`) and kept in the `JournaledState`. +// pub(crate) fn merge_account_data( +// accounts: impl IntoIterator, +// active: &CacheDB, +// active_journaled_state: &mut JournaledState, +// target_fork: &mut Fork, +// merge_zk_db: bool, +// ) { +// for addr in accounts.into_iter() { +// merge_db_account_data(addr, active, &mut target_fork.db); +// if merge_zk_db { +// merge_zk_account_data(addr, active, &mut target_fork.db); +// } +// merge_journaled_state_data(addr, active_journaled_state, &mut +// target_fork.journaled_state); } + +// // need to mock empty journal entries in case the current checkpoint is higher than the +// existing // journal entries +// while active_journaled_state.journal.len() > target_fork.journaled_state.journal.len() { +// target_fork.journaled_state.journal.push(Default::default()); +// } + +// *active_journaled_state = target_fork.journaled_state.clone(); +// } + +// /// Clones the account data from the `active_journaled_state` into the `fork_journaled_state` +// fn merge_journaled_state_data( +// addr: Address, +// active_journaled_state: &JournaledState, +// fork_journaled_state: &mut JournaledState, +// ) { +// if let Some(mut acc) = active_journaled_state.state.get(&addr).cloned() { +// trace!(?addr, "updating journaled_state account data"); +// if let Some(fork_account) = fork_journaled_state.state.get_mut(&addr) { +// // This will merge the fork's tracked storage with active storage and update values +// fork_account.storage.extend(std::mem::take(&mut acc.storage)); +// // swap them so we can insert the account as whole in the next step +// std::mem::swap(&mut fork_account.storage, &mut acc.storage); +// } +// fork_journaled_state.state.insert(addr, acc); +// } +// } + +// /// Clones the account data from the `active` db into the `ForkDB` +// fn merge_db_account_data( +// addr: Address, +// active: &CacheDB, +// fork_db: &mut ForkDB, +// ) { +// let mut acc = if let Some(acc) = active.accounts.get(&addr).cloned() { +// acc +// } else { +// // Account does not exist +// return; +// }; + +// if let Some(code) = active.contracts.get(&acc.info.code_hash).cloned() { +// fork_db.contracts.insert(acc.info.code_hash, code); +// } + +// if let Some(fork_account) = fork_db.accounts.get_mut(&addr) { +// // This will merge the fork's tracked storage with active storage and update values +// fork_account.storage.extend(std::mem::take(&mut acc.storage)); +// // swap them so we can insert the account as whole in the next step +// std::mem::swap(&mut fork_account.storage, &mut acc.storage); +// } + +// fork_db.accounts.insert(addr, acc); +// } + +// /// Clones the zk account data from the `active` db into the `ForkDB` +// fn merge_zk_account_data( +// addr: Address, +// active: &CacheDB, +// fork_db: &mut ForkDB, +// ) { +// let mut merge_system_contract_entry = |system_contract: Address, slot: U256| { +// let mut acc = if let Some(acc) = active.accounts.get(&system_contract).cloned() { +// acc +// } else { +// // Account does not exist +// return; +// }; + +// let mut storage = Map::::default(); +// if let Some(value) = acc.storage.get(&slot) { +// storage.insert(slot, *value); +// } + +// if let Some(fork_account) = fork_db.accounts.get_mut(&system_contract) { +// // This will merge the fork's tracked storage with active storage and update values +// fork_account.storage.extend(storage); +// // swap them so we can insert the account as whole in the next step +// std::mem::swap(&mut fork_account.storage, &mut acc.storage); +// } else { +// std::mem::swap(&mut storage, &mut acc.storage) +// } + +// fork_db.accounts.insert(system_contract, acc); +// }; + +// merge_system_contract_entry( +// L2_BASE_TOKEN_ADDRESS.to_address(), +// foundry_zksync_core::get_balance_key(addr), +// ); +// merge_system_contract_entry( +// ACCOUNT_CODE_STORAGE_ADDRESS.to_address(), +// foundry_zksync_core::get_account_code_key(addr), +// ); +// merge_system_contract_entry( +// NONCE_HOLDER_ADDRESS.to_address(), +// foundry_zksync_core::get_nonce_key(addr), +// ); +// } /// Returns true of the address is a contract fn is_contract_in_state(journaled_state: &JournaledState, acc: Address) -> bool { @@ -2059,7 +2099,7 @@ fn commit_transaction>( fork: &mut Fork, fork_id: &ForkId, persistent_accounts: &HashSet
, - inspector: I, + mut inspector: I, ) -> eyre::Result<()> { configure_tx_env(&mut env.env, &tx.inner); @@ -2070,7 +2110,7 @@ fn commit_transaction>( let depth = journaled_state.depth; let db = Backend::new_with_fork(fork_id, fork, journaled_state); - let mut evm = crate::utils::new_evm_with_inspector(db, env, inspector); + let mut evm = crate::utils::new_evm_with_inspector(db, env, &mut inspector); // Adjust inner EVM depth to ensure that inspectors receive accurate data. evm.context.evm.inner.journaled_state.depth = depth + 1; evm.transact().wrap_err("backend: failed committing transaction")? diff --git a/crates/evm/core/src/backend/strategy.rs b/crates/evm/core/src/backend/strategy.rs new file mode 100644 index 000000000..2aa5eeddd --- /dev/null +++ b/crates/evm/core/src/backend/strategy.rs @@ -0,0 +1,176 @@ +use crate::InspectorExt; + +use super::{Backend, BackendInner, Fork, ForkDB, ForkType, FoundryEvmInMemoryDB}; +use alloy_primitives::Address; +use eyre::Context; +use revm::{ + db::CacheDB, + primitives::{EnvWithHandlerCfg, ResultAndState}, + DatabaseRef, JournaledState, +}; + +pub struct BackendStrategyForkInfo<'a> { + pub active_fork: Option<&'a Fork>, + pub active_type: ForkType, + pub target_type: ForkType, +} + +pub trait BackendStrategy: std::fmt::Debug + Send + Sync { + /// When creating or switching forks, we update the AccountInfo of the contract + fn update_fork_db( + &self, + fork_info: BackendStrategyForkInfo<'_>, + mem_db: &FoundryEvmInMemoryDB, + backend_inner: &BackendInner, + active_journaled_state: &mut JournaledState, + target_fork: &mut Fork, + ); + + /// Executes the configured test call of the `env` without committing state changes. + /// + /// Note: in case there are any cheatcodes executed that modify the environment, this will + /// update the given `env` with the new values. + #[instrument(name = "inspect", level = "debug", skip_all)] + fn inspect<'a>( + &mut self, + backend: &'a mut Backend, + env: &mut EnvWithHandlerCfg, + inspector: &mut dyn InspectorExt<&'a mut Backend>, + _extra: Option>, + ) -> eyre::Result { + backend.initialize(env); + let mut evm = crate::utils::new_evm_with_inspector(backend, env.clone(), inspector); + + let res = evm.transact().wrap_err("backend: failed while inspecting")?; + + env.env = evm.context.evm.inner.env; + + Ok(res) + } +} + +struct _ObjectSafe(dyn BackendStrategy); + +#[derive(Debug)] +pub struct EvmBackendStrategy; + +impl BackendStrategy for EvmBackendStrategy { + fn update_fork_db( + &self, + fork_info: BackendStrategyForkInfo<'_>, + mem_db: &FoundryEvmInMemoryDB, + backend_inner: &BackendInner, + active_journaled_state: &mut JournaledState, + target_fork: &mut Fork, + ) { + self.update_fork_db_contracts( + fork_info, + mem_db, + backend_inner, + active_journaled_state, + target_fork, + ) + } +} + +impl EvmBackendStrategy { + /// Merges the state of all `accounts` from the currently active db into the given `fork` + pub(crate) fn update_fork_db_contracts( + &self, + fork_info: BackendStrategyForkInfo<'_>, + mem_db: &FoundryEvmInMemoryDB, + backend_inner: &BackendInner, + active_journaled_state: &mut JournaledState, + target_fork: &mut Fork, + ) { + let accounts = backend_inner.persistent_accounts.iter().copied(); + if let Some(db) = fork_info.active_fork.map(|f| &f.db) { + EvmBackendMergeStrategy::merge_account_data( + accounts, + db, + active_journaled_state, + target_fork, + ) + } else { + EvmBackendMergeStrategy::merge_account_data( + accounts, + mem_db, + active_journaled_state, + target_fork, + ) + } + } +} +pub struct EvmBackendMergeStrategy; +impl EvmBackendMergeStrategy { + /// Clones the data of the given `accounts` from the `active` database into the `fork_db` + /// This includes the data held in storage (`CacheDB`) and kept in the `JournaledState`. + pub fn merge_account_data( + accounts: impl IntoIterator, + active: &CacheDB, + active_journaled_state: &mut JournaledState, + target_fork: &mut Fork, + ) { + for addr in accounts.into_iter() { + merge_db_account_data(addr, active, &mut target_fork.db); + merge_journaled_state_data( + addr, + active_journaled_state, + &mut target_fork.journaled_state, + ); + } + + // need to mock empty journal entries in case the current checkpoint is higher than the + // existing journal entries + while active_journaled_state.journal.len() > target_fork.journaled_state.journal.len() { + target_fork.journaled_state.journal.push(Default::default()); + } + + *active_journaled_state = target_fork.journaled_state.clone(); + } +} + +/// Clones the account data from the `active_journaled_state` into the `fork_journaled_state` +pub fn merge_journaled_state_data( + addr: Address, + active_journaled_state: &JournaledState, + fork_journaled_state: &mut JournaledState, +) { + if let Some(mut acc) = active_journaled_state.state.get(&addr).cloned() { + trace!(?addr, "updating journaled_state account data"); + if let Some(fork_account) = fork_journaled_state.state.get_mut(&addr) { + // This will merge the fork's tracked storage with active storage and update values + fork_account.storage.extend(std::mem::take(&mut acc.storage)); + // swap them so we can insert the account as whole in the next step + std::mem::swap(&mut fork_account.storage, &mut acc.storage); + } + fork_journaled_state.state.insert(addr, acc); + } +} + +/// Clones the account data from the `active` db into the `ForkDB` +pub fn merge_db_account_data( + addr: Address, + active: &CacheDB, + fork_db: &mut ForkDB, +) { + let mut acc = if let Some(acc) = active.accounts.get(&addr).cloned() { + acc + } else { + // Account does not exist + return; + }; + + if let Some(code) = active.contracts.get(&acc.info.code_hash).cloned() { + fork_db.contracts.insert(acc.info.code_hash, code); + } + + if let Some(fork_account) = fork_db.accounts.get_mut(&addr) { + // This will merge the fork's tracked storage with active storage and update values + fork_account.storage.extend(std::mem::take(&mut acc.storage)); + // swap them so we can insert the account as whole in the next step + std::mem::swap(&mut fork_account.storage, &mut acc.storage); + } + + fork_db.accounts.insert(addr, acc); +} diff --git a/crates/evm/core/src/utils.rs b/crates/evm/core/src/utils.rs index f39c65964..ec09a274e 100644 --- a/crates/evm/core/src/utils.rs +++ b/crates/evm/core/src/utils.rs @@ -246,14 +246,14 @@ pub fn alphanet_handler_register>( } /// Creates a new EVM with the given inspector. -pub fn new_evm_with_inspector<'a, DB, I>( +pub fn new_evm_with_inspector<'a, DB>( db: DB, env: revm::primitives::EnvWithHandlerCfg, - inspector: I, -) -> revm::Evm<'a, I, DB> + inspector: &mut dyn InspectorExt, +) -> revm::Evm<'a, &mut dyn InspectorExt, DB> where DB: revm::Database, - I: InspectorExt, + // I: InspectorExt, { let revm::primitives::EnvWithHandlerCfg { env, handler_cfg } = env; @@ -283,14 +283,13 @@ where } /// Creates a new EVM with the given inspector and wraps the database in a `WrapDatabaseRef`. -pub fn new_evm_with_inspector_ref<'a, DB, I>( +pub fn new_evm_with_inspector_ref<'a, DB>( db: DB, env: revm::primitives::EnvWithHandlerCfg, - inspector: I, -) -> revm::Evm<'a, I, WrapDatabaseRef> + inspector: &mut dyn InspectorExt>, +) -> revm::Evm<'a, &mut dyn InspectorExt>, WrapDatabaseRef> where DB: revm::DatabaseRef, - I: InspectorExt>, { new_evm_with_inspector(WrapDatabaseRef(db), env, inspector) } diff --git a/crates/evm/evm/Cargo.toml b/crates/evm/evm/Cargo.toml index 65966dfa5..9488f42d0 100644 --- a/crates/evm/evm/Cargo.toml +++ b/crates/evm/evm/Cargo.toml @@ -47,6 +47,7 @@ revm = { workspace = true, default-features = false, features = [ revm-inspectors.workspace = true eyre.workspace = true +serde_json.workspace = true parking_lot.workspace = true proptest.workspace = true thiserror.workspace = true diff --git a/crates/evm/evm/src/executors/mod.rs b/crates/evm/evm/src/executors/mod.rs index ba8c50360..5e1e9b7c9 100644 --- a/crates/evm/evm/src/executors/mod.rs +++ b/crates/evm/evm/src/executors/mod.rs @@ -448,22 +448,27 @@ impl Executor { pub fn transact_with_env(&mut self, mut env: EnvWithHandlerCfg) -> eyre::Result { let mut inspector = self.inspector.clone(); let backend = &mut self.backend; - let result_and_state = match self.zk_tx.take() { - None => backend.inspect(&mut env, &mut inspector)?, - Some(zk_tx) => { - // apply fork-related env instead of cheatcode handler - // since it won't be run inside zkvm - env.block = self.env.block.clone(); - env.tx.gas_price = self.env.tx.gas_price; - backend.inspect_ref_zk( - &mut env, - // this will persist the added factory deps, - // no need to commit them later - &mut self.zk_persisted_factory_deps, - Some(zk_tx.factory_deps), - )? - } - }; + let strategy = backend.strategy.clone(); + let extra = self.zk_tx.take().map(|zk_tx| serde_json::to_vec(&zk_tx).unwrap()); + let result_and_state = + strategy.lock().unwrap().inspect(backend, &mut env, &mut inspector, extra)?; + + // let result_and_state = match self.zk_tx.take() { + // None => backend.inspect(&mut env, &mut inspector, &[])?, + // Some(zk_tx) => { + // // apply fork-related env instead of cheatcode handler + // // since it won't be run inside zkvm + // env.block = self.env.block.clone(); + // env.tx.gas_price = self.env.tx.gas_price; + // backend.inspect_ref_zk( + // &mut env, + // // this will persist the added factory deps, + // // no need to commit them later + // &mut self.zk_persisted_factory_deps, + // Some(zk_tx.factory_deps), + // )? + // } + // }; let mut result = convert_executed_result( env, inspector, diff --git a/crates/forge/Cargo.toml b/crates/forge/Cargo.toml index 42ec8cdca..958df9fa6 100644 --- a/crates/forge/Cargo.toml +++ b/crates/forge/Cargo.toml @@ -36,6 +36,7 @@ foundry-evm.workspace = true foundry-evm-abi.workspace = true foundry-wallets.workspace = true foundry-linking.workspace = true +foundry-strategy.workspace = true foundry-zksync-core.workspace = true foundry-zksync-compiler.workspace = true @@ -61,6 +62,7 @@ forge-script.workspace = true forge-sol-macro-gen.workspace = true foundry-cli.workspace = true foundry-debugger.workspace = true +foundry-zksync.workspace = true alloy-chains.workspace = true alloy-consensus.workspace = true diff --git a/crates/forge/src/multi_runner.rs b/crates/forge/src/multi_runner.rs index 0761b9054..00d338859 100644 --- a/crates/forge/src/multi_runner.rs +++ b/crates/forge/src/multi_runner.rs @@ -26,6 +26,8 @@ use foundry_evm::{ traces::{InternalTraceMode, TraceMode}, }; use foundry_linking::{LinkOutput, Linker}; +use foundry_strategy::{EvmRunnerStrategy, RunnerStrategy}; +use foundry_zksync::ZkRunnerStrategy; use foundry_zksync_compiler::DualCompiledContracts; use rayon::prelude::*; use revm::primitives::SpecId; @@ -88,7 +90,7 @@ pub struct MultiContractRunner { /// Dual compiled contracts pub dual_compiled_contracts: DualCompiledContracts, /// Use zk runner. - pub use_zk: bool, + pub strategy: Box, } impl MultiContractRunner { @@ -181,8 +183,7 @@ impl MultiContractRunner { trace!("running all tests"); // The DB backend that serves all the data. - let mut db = Backend::spawn(self.fork.take()); - db.is_zk = self.use_zk; + let db = Backend::spawn_with_strategy(self.fork.take(), self.strategy.backend_strategy()); let find_timer = Instant::now(); let contracts = self.matching_contracts(filter).collect::>(); @@ -255,7 +256,7 @@ impl MultiContractRunner { None, Some(artifact_id.version.clone()), self.dual_compiled_contracts.clone(), - self.use_zk, + self.strategy.name() == "zk", // use_zk ); let trace_mode = TraceMode::default() @@ -272,7 +273,7 @@ impl MultiContractRunner { .enable_isolation(self.isolation) .alphanet(self.alphanet) }) - .use_zk_vm(self.use_zk) + .use_zk_vm(self.strategy.name() == "zk") // use_zk .spec(self.evm_spec) .gas_limit(self.evm_opts.gas_limit()) .legacy_assertions(self.config.legacy_assertions) @@ -416,6 +417,11 @@ impl MultiContractRunnerBuilder { dual_compiled_contracts: DualCompiledContracts, ) -> Result { let use_zk = zk_output.is_some(); + let strategy: Box = if use_zk { + Box::new(ZkRunnerStrategy::default()) + } else { + Box::new(EvmRunnerStrategy::default()) + }; let mut known_contracts = ContractsByArtifact::default(); let output = output.with_stripped_file_prefixes(root); let linker = Linker::new(root, output.artifact_ids().collect()); @@ -515,7 +521,7 @@ impl MultiContractRunnerBuilder { libs_to_deploy, libraries, dual_compiled_contracts, - use_zk, + strategy, }) } } diff --git a/crates/foundry-zksync/Cargo.toml b/crates/foundry-zksync/Cargo.toml new file mode 100644 index 000000000..080f57139 --- /dev/null +++ b/crates/foundry-zksync/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "foundry-zksync" + +version.workspace = true +edition.workspace = true +rust-version.workspace = true +authors.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +foundry-common.workspace = true +foundry-compilers.workspace = true +foundry-evm-traces.workspace = true +foundry-evm-core.workspace = true +foundry-strategy.workspace = true +foundry-zksync-core.workspace = true +revm-inspectors.workspace = true + +alloy-primitives.workspace = true + +eyre.workspace = true +revm.workspace = true +tracing.workspace = true +serde.workspace = true +serde_json.workspace = true diff --git a/crates/foundry-zksync/src/lib.rs b/crates/foundry-zksync/src/lib.rs new file mode 100644 index 000000000..52fc2a633 --- /dev/null +++ b/crates/foundry-zksync/src/lib.rs @@ -0,0 +1,202 @@ +use std::sync::{Arc, Mutex}; + +use alloy_primitives::{Address, U256}; +use foundry_evm_core::{ + backend::{ + strategy::{ + merge_db_account_data, merge_journaled_state_data, BackendStrategy, + BackendStrategyForkInfo, + }, + Backend, BackendInner, Fork, ForkDB, FoundryEvmInMemoryDB, + }, + InspectorExt, +}; +use foundry_strategy::RunnerStrategy; +use foundry_zksync_core::{ + convert::ConvertH160, ACCOUNT_CODE_STORAGE_ADDRESS, H256, L2_BASE_TOKEN_ADDRESS, + NONCE_HOLDER_ADDRESS, +}; +use revm::{ + db::CacheDB, + primitives::{EnvWithHandlerCfg, HashMap, ResultAndState}, + DatabaseRef, JournaledState, +}; + +#[derive(Debug)] +pub struct ZkBackendStrategy { + persisted_factory_deps: HashMap>, +} + +impl BackendStrategy for ZkBackendStrategy { + /// When creating or switching forks, we update the AccountInfo of the contract. + fn update_fork_db( + &self, + fork_info: BackendStrategyForkInfo<'_>, + mem_db: &FoundryEvmInMemoryDB, + backend_inner: &BackendInner, + active_journaled_state: &mut JournaledState, + target_fork: &mut Fork, + ) { + self.update_fork_db_contracts( + fork_info, + mem_db, + backend_inner, + active_journaled_state, + target_fork, + ) + } + + fn inspect<'a>( + &mut self, + backend: &'a mut Backend, + env: &mut EnvWithHandlerCfg, + _inspector: &mut dyn InspectorExt<&'a mut Backend>, + extra: Option>, + ) -> eyre::Result { + backend.initialize(env); + + let factory_deps = serde_json::from_slice(&extra.unwrap()).unwrap(); + foundry_zksync_core::vm::transact( + Some(&mut self.persisted_factory_deps), + factory_deps, + env, + backend, + ) + } +} + +impl ZkBackendStrategy { + /// Merges the state of all `accounts` from the currently active db into the given `fork` + pub(crate) fn update_fork_db_contracts( + &self, + fork_info: BackendStrategyForkInfo<'_>, + mem_db: &FoundryEvmInMemoryDB, + backend_inner: &BackendInner, + active_journaled_state: &mut JournaledState, + target_fork: &mut Fork, + ) { + let require_zk_storage_merge = + fork_info.active_type.is_zk() && fork_info.target_type.is_zk(); + let accounts = backend_inner.persistent_accounts.iter().copied(); + if let Some(db) = fork_info.active_fork.map(|f| &f.db) { + ZkBackendMergeStrategy::merge_account_data( + accounts, + db, + active_journaled_state, + target_fork, + require_zk_storage_merge, + ) + } else { + ZkBackendMergeStrategy::merge_account_data( + accounts, + mem_db, + active_journaled_state, + target_fork, + require_zk_storage_merge, + ) + } + } +} + +pub struct ZkBackendMergeStrategy; +impl ZkBackendMergeStrategy { + /// Clones the data of the given `accounts` from the `active` database into the `fork_db` + /// This includes the data held in storage (`CacheDB`) and kept in the `JournaledState`. + pub fn merge_account_data( + accounts: impl IntoIterator, + active: &CacheDB, + active_journaled_state: &mut JournaledState, + target_fork: &mut Fork, + _require_zk_storage_merge: bool, + ) { + for addr in accounts.into_iter() { + merge_db_account_data(addr, active, &mut target_fork.db); + + // We do not care about EVM interoperatability now, so always update zk storage + // if require_zk_storage_merge { + // merge_zk_storage_account_data(addr, active, &mut target_fork.db); + // } + merge_zk_storage_account_data(addr, active, &mut target_fork.db); + merge_journaled_state_data( + addr, + active_journaled_state, + &mut target_fork.journaled_state, + ); + } + + // need to mock empty journal entries in case the current checkpoint is higher than the + // existing journal entries + while active_journaled_state.journal.len() > target_fork.journaled_state.journal.len() { + target_fork.journaled_state.journal.push(Default::default()); + } + + *active_journaled_state = target_fork.journaled_state.clone(); + } +} + +/// Clones the zk account data from the `active` db into the `ForkDB` +fn merge_zk_storage_account_data( + addr: Address, + active: &CacheDB, + fork_db: &mut ForkDB, +) { + let mut merge_system_contract_entry = |system_contract: Address, slot: U256| { + let mut acc = if let Some(acc) = active.accounts.get(&system_contract).cloned() { + acc + } else { + // Account does not exist + return; + }; + + let mut storage = HashMap::::default(); + if let Some(value) = acc.storage.get(&slot) { + storage.insert(slot, *value); + } + + if let Some(fork_account) = fork_db.accounts.get_mut(&system_contract) { + // This will merge the fork's tracked storage with active storage and update values + fork_account.storage.extend(storage); + // swap them so we can insert the account as whole in the next step + std::mem::swap(&mut fork_account.storage, &mut acc.storage); + } else { + std::mem::swap(&mut storage, &mut acc.storage) + } + + fork_db.accounts.insert(system_contract, acc); + }; + + merge_system_contract_entry( + L2_BASE_TOKEN_ADDRESS.to_address(), + foundry_zksync_core::get_balance_key(addr), + ); + merge_system_contract_entry( + ACCOUNT_CODE_STORAGE_ADDRESS.to_address(), + foundry_zksync_core::get_account_code_key(addr), + ); + merge_system_contract_entry( + NONCE_HOLDER_ADDRESS.to_address(), + foundry_zksync_core::get_nonce_key(addr), + ); +} + +pub struct ZkRunnerStrategy { + pub backend: Arc>, +} +impl Default for ZkRunnerStrategy { + fn default() -> Self { + Self { + backend: Arc::new(Mutex::new(ZkBackendStrategy { + persisted_factory_deps: Default::default(), + })), + } + } +} +impl RunnerStrategy for ZkRunnerStrategy { + fn name(&self) -> &'static str { + "zk" + } + + fn backend_strategy(&self) -> Arc> { + self.backend.clone() + } +} diff --git a/crates/strategy/Cargo.toml b/crates/strategy/Cargo.toml new file mode 100644 index 000000000..8b28fdcdc --- /dev/null +++ b/crates/strategy/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "foundry-strategy" + +version.workspace = true +edition.workspace = true +rust-version.workspace = true +authors.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +foundry-evm-core.workspace = true \ No newline at end of file diff --git a/crates/strategy/src/lib.rs b/crates/strategy/src/lib.rs new file mode 100644 index 000000000..56736c401 --- /dev/null +++ b/crates/strategy/src/lib.rs @@ -0,0 +1,25 @@ +use std::sync::{Arc, Mutex}; + +use foundry_evm_core::backend::strategy::{BackendStrategy, EvmBackendStrategy}; +pub trait RunnerStrategy: Send + Sync { + fn name(&self) -> &'static str; + fn backend_strategy(&self) -> Arc>; +} + +pub struct EvmRunnerStrategy { + pub backend: Arc>, +} +impl Default for EvmRunnerStrategy { + fn default() -> Self { + Self { backend: Arc::new(Mutex::new(EvmBackendStrategy)) } + } +} +impl RunnerStrategy for EvmRunnerStrategy { + fn name(&self) -> &'static str { + "evm" + } + + fn backend_strategy(&self) -> Arc> { + self.backend.clone() + } +}