From fae30ab5864a31ab0dbd7fc411b22b7f80ed8e94 Mon Sep 17 00:00:00 2001 From: Nisheeth Barthwal Date: Mon, 30 Oct 2023 17:42:19 +0100 Subject: [PATCH 1/6] chore: refactor node namespaces (#197) --- SUPPORTED_APIS.md | 126 +-- src/configuration_api.rs | 231 ---- src/evm.rs | 675 ------------ src/hardhat.rs | 518 --------- src/http_fork_source.rs | 8 +- src/lib.rs | 3 +- src/main.rs | 56 +- src/namespaces/config.rs | 93 ++ src/{ => namespaces}/debug.rs | 0 src/namespaces/evm.rs | 70 ++ src/namespaces/hardhat.rs | 89 ++ src/namespaces/mod.rs | 17 + src/namespaces/net.rs | 16 + src/net.rs | 38 - src/node/config.rs | 189 ++++ src/node/debug.rs | 562 ++++++++++ src/{node.rs => node/eth.rs} | 1927 +++------------------------------ src/node/evm.rs | 68 ++ src/node/hardhat.rs | 68 ++ src/node/in_memory.rs | 1641 ++++++++++++++++++++++++++++ src/node/in_memory_ext.rs | 901 +++++++++++++++ src/node/mod.rs | 13 + src/node/net.rs | 23 + src/{ => node}/zks.rs | 195 +--- src/testing.rs | 4 +- 25 files changed, 4030 insertions(+), 3501 deletions(-) delete mode 100644 src/configuration_api.rs delete mode 100644 src/evm.rs delete mode 100644 src/hardhat.rs create mode 100644 src/namespaces/config.rs rename src/{ => namespaces}/debug.rs (100%) create mode 100644 src/namespaces/evm.rs create mode 100644 src/namespaces/hardhat.rs create mode 100644 src/namespaces/mod.rs create mode 100644 src/namespaces/net.rs delete mode 100644 src/net.rs create mode 100644 src/node/config.rs create mode 100644 src/node/debug.rs rename src/{node.rs => node/eth.rs} (59%) create mode 100644 src/node/evm.rs create mode 100644 src/node/hardhat.rs create mode 100644 src/node/in_memory.rs create mode 100644 src/node/in_memory_ext.rs create mode 100644 src/node/mod.rs create mode 100644 src/node/net.rs rename src/{ => node}/zks.rs (87%) diff --git a/SUPPORTED_APIS.md b/SUPPORTED_APIS.md index 58fad3c5..16b408ba 100644 --- a/SUPPORTED_APIS.md +++ b/SUPPORTED_APIS.md @@ -134,7 +134,7 @@ The `status` options are: ### `config_getShowCalls` -[source](src/configuration_api.rs) +[source](src/node/config.rs) Gets the current value of `show_calls` that's originally set with `--show-calls` option @@ -157,7 +157,7 @@ curl --request POST \ ### `config_getCurrentTimestamp` -[source](src/configuration_api.rs) +[source](src/node/config.rs) Gets the value of `current_timestamp` for the node @@ -180,7 +180,7 @@ curl --request POST \ ### `config_setShowCalls` -[source](src/configuration_api.rs) +[source](src/node/config.rs) Updates `show_calls` to print more detailed call traces @@ -203,7 +203,7 @@ curl --request POST \ ### `config_setShowStorageLogs` -[source](src/configuration_api.rs) +[source](src/node/config.rs) Updates `show_storage_logs` to print storage log reads/writes @@ -226,7 +226,7 @@ curl --request POST \ ### `config_setShowVmDetails` -[source](src/configuration_api.rs) +[source](src/node/config.rs) Updates `show_vm_details` to print more detailed results from vm execution @@ -249,7 +249,7 @@ curl --request POST \ ### `config_setShowGasDetails` -[source](src/configuration_api.rs) +[source](src/node/config.rs) Updates `show_gas_details` to print more details about gas estimation and usage @@ -272,7 +272,7 @@ curl --request POST \ ### `config_setResolveHashes` -[source](src/configuration_api.rs) +[source](src/node/config.rs) Updates `resolve-hashes` to call OpenChain for human-readable ABI names in call traces @@ -295,7 +295,7 @@ curl --request POST \ ### `config_setLogLevel` -[source](src/configuration_api.rs) +[source](src/node/config.rs) Sets the logging level for the node and only displays the node logs. @@ -318,7 +318,7 @@ curl --request POST \ ### `config_setLogging` -[source](src/configuration_api.rs) +[source](src/node/config.rs) Sets the fine-tuned logging levels for the node and any of its dependencies. The directive format is comma-separated `module=level` for any number of modules. @@ -344,7 +344,7 @@ curl --request POST \ ### `debug_traceCall` -[source](src/debug.rs) +[source](src/node/debug.rs) The `debug_traceCall` is similar to `eth_call` but returns call traces for each call. @@ -388,7 +388,7 @@ curl --request POST \ ### `debug_traceTransaction` -[source](src/debug.rs) +[source](src/node/debug.rs) Returns call traces for the transaction with given hash. @@ -425,7 +425,7 @@ curl --request POST \ ### `debug_traceBlockByHash` -[source](src/debug.rs) +[source](src/node/debug.rs) Returns call traces for each transaction within a given block. @@ -462,7 +462,7 @@ curl --request POST \ ### `debug_traceBlockByNumber` -[source](src/debug.rs) +[source](src/node/debug.rs) Returns call traces for each transaction within a given block. @@ -501,7 +501,7 @@ curl --request POST \ ### `net_version` -[source](src/net.rs) +[source](src/node/net.rs) Returns the current network id @@ -524,7 +524,7 @@ curl --request POST \ ### `net_peerCount` -[source](src/net.rs) +[source](src/node/net.rs) Returns the number of connected peers @@ -547,7 +547,7 @@ curl --request POST \ ### `net_listening` -[source](src/net.rs) +[source](src/node/net.rs) Returns `true` if the node is listening for connections @@ -572,7 +572,7 @@ curl --request POST \ ### `eth_accounts` -[source](src/node.rs) +[source](src/node/eth.rs) Returns a list of addresses owned by client @@ -595,7 +595,7 @@ curl --request POST \ ### `eth_chainId` -[source](src/node.rs) +[source](src/node/eth.rs) Returns the current chain id @@ -618,7 +618,7 @@ curl --request POST \ ### `eth_estimateGas` -[source](src/node.rs) +[source](src/node/eth.rs) Generates and returns an estimate of how much gas is necessary to allow the transaction to complete @@ -654,7 +654,7 @@ curl --request POST \ ### `eth_feeHistory` -[source](src/node.rs) +[source](src/node/eth.rs) Returns the fee history for a given range of blocks @@ -679,7 +679,7 @@ curl --request POST \ ### `eth_gasPrice` -[source](src/node.rs) +[source](src/node/eth.rs) Returns the current price per gas in wei @@ -702,7 +702,7 @@ curl --request POST \ ### `eth_getBalance` -[source](src/node.rs) +[source](src/node/eth.rs) Returns the balance of the account of given address @@ -732,7 +732,7 @@ curl --request POST \ ### `eth_getBlockByHash` -[source](src/node.rs) +[source](src/node/eth.rs) Returns information about a block by block hash @@ -762,7 +762,7 @@ curl --request POST \ ### `eth_getBlockByNumber` -[source](src/node.rs) +[source](src/node/eth.rs) Returns information about a block by block number @@ -792,7 +792,7 @@ curl --request POST \ ### `eth_getBlockTransactionCountByHash` -[source](src/node.rs) +[source](src/node/eth.rs) Number of transactions in a block from a block matching the given block hash @@ -820,7 +820,7 @@ curl --request POST \ ### `eth_getBlockTransactionCountByNumber` -[source](src/node.rs) +[source](src/node/eth.rs) Number of transactions in a block from a block matching the given block number @@ -849,7 +849,7 @@ curl --request POST \ ### `eth_getFilterChanges` -[source](src/node.rs) +[source](src/node/eth.rs) Polling method for a filter, which returns an array of logs, block hashes, or transaction hashes, depending on the filter type, which occurred since last poll @@ -877,7 +877,7 @@ curl --request POST \ ### `eth_newBlockFilter` -[source](src/node.rs) +[source](src/node/eth.rs) Creates a filter in the node, to notify when a new block arrives @@ -904,7 +904,7 @@ curl --request POST \ ### `eth_newFilter` -[source](src/node.rs) +[source](src/node/eth.rs) Creates a filter object, based on filter options, to notify when the state changes (logs) @@ -937,7 +937,7 @@ curl --request POST \ ### `eth_newPendingTransactionFilter` -[source](src/node.rs) +[source](src/node/eth.rs) Creates a filter in the node, to notify when new pending transactions arrive @@ -964,7 +964,7 @@ curl --request POST \ ### `eth_uninstallFilter` -[source](src/node.rs) +[source](src/node/eth.rs) Uninstalls a filter with given id @@ -992,7 +992,7 @@ curl --request POST \ ### `eth_getFilterLogs` -[source](src/node.rs) +[source](src/node/eth.rs) Returns an array of all logs matching filter with given id @@ -1020,7 +1020,7 @@ curl --request POST \ ### `eth_getLogs` -[source](src/node.rs) +[source](src/node/eth.rs) Returns an array of all logs matching a filter @@ -1053,7 +1053,7 @@ curl --request POST \ ### `eth_getCode` -[source](src/node.rs) +[source](src/node/eth.rs) Returns code at a given address @@ -1083,7 +1083,7 @@ curl --request POST \ ### `eth_getTransactionByHash` -[source](src/node.rs) +[source](src/node/eth.rs) Returns the information about a transaction requested by transaction hash @@ -1111,7 +1111,7 @@ curl --request POST \ ### `eth_getTransactionCount` -[source](src/node.rs) +[source](src/node/eth.rs) Returns the number of transactions sent from an address @@ -1141,7 +1141,7 @@ curl --request POST \ ### `eth_getTransactionReceipt` -[source](src/node.rs) +[source](src/node/eth.rs) Returns the transaction receipt for a given transaction hash @@ -1169,7 +1169,7 @@ curl --request POST \ ### `eth_blockNumber` -[source](src/node.rs) +[source](src/node/eth.rs) Returns the number of most recent block @@ -1192,7 +1192,7 @@ curl --request POST \ ### `eth_call` -[source](src/node.rs) +[source](src/node/eth.rs) Executes a new message call immediately without creating a transaction on the block chain @@ -1230,7 +1230,7 @@ curl --request POST \ ### `eth_sendRawTransaction` -[source](src/node.rs) +[source](src/node/eth.rs) Creates new message call transaction or a contract creation for signed transactions @@ -1254,7 +1254,7 @@ curl --request POST \ ### `eth_syncing` -[source](src/node.rs) +[source](src/node/eth.rs) Returns syncing status of the node. This will always return `false`. @@ -1278,7 +1278,7 @@ curl --request POST \ ### `eth_getStorageAt` -[source](src/node.rs) +[source](src/node/eth.rs) Returns the value from a storage position at a given address. @@ -1308,7 +1308,7 @@ curl --request POST \ ### `eth_getTransactionByBlockHashAndIndex` -[source](src/node.rs) +[source](src/node/eth.rs) Returns information about a transaction by block hash and transaction index position @@ -1337,7 +1337,7 @@ curl --request POST \ ### `eth_getTransactionByBlockNumberAndIndex` -[source](src/node.rs) +[source](src/node/eth.rs) Returns information about a transaction by block number and transaction index position @@ -1366,7 +1366,7 @@ curl --request POST \ ### `eth_protocolVersion` -[source](src/node.rs) +[source](src/node/eth.rs) Returns the current ethereum protocol version. @@ -1395,7 +1395,7 @@ curl --request POST \ ### `hardhat_setBalance` -[source](src/hardhat.rs) +[source](src/node/hardhat.rs) Sets the balance of the given address to the given balance. @@ -1427,7 +1427,7 @@ curl --request POST \ ### `hardhat_setNonce` -[source](src/hardhat.rs) +[source](src/node/hardhat.rs) Modifies an account's nonce by overwriting it. The new nonce must be greater than the existing nonce. @@ -1456,7 +1456,7 @@ curl --request POST \ ### `hardhat_mine` -[source](src/hardhat.rs) +[source](src/node/hardhat.rs) Sometimes you may want to advance the latest block number of the network by a large number of blocks. One way to do this would be to call the evm_mine RPC method multiple times, but this is too slow if you want to mine thousands of blocks. @@ -1486,7 +1486,7 @@ curl --request POST \ ``` ### `hardhat_impersonateAccount` -[source](src/hardhat.rs) +[source](src/node/hardhat.rs) Begin impersonating account- subsequent transactions sent to the node will be committed as if they were initiated by the supplied address. @@ -1512,7 +1512,7 @@ curl --request POST \ ### `hardhat_stopImpersonatingAccount` -[source](src/hardhat.rs) +[source](src/node/hardhat.rs) Stop impersonating account, should be used after calling `hardhat_impersonateAccount`. Since we only impersonate one account at a time, the `address` argument is ignored and the current @@ -1540,7 +1540,7 @@ curl --request POST \ ### `hardhat_setCode` -[source](src/hardhat.rs) +[source](src/node/hardhat.rs) Sets the code for a given address. @@ -1574,7 +1574,7 @@ curl --request POST \ ### `evm_mine` -[source](src/evm.rs) +[source](src/node/evm.rs) Mines an empty block @@ -1594,7 +1594,7 @@ curl --request POST \ ### `evm_increaseTime` -[source](src/evm.rs) +[source](src/node/evm.rs) Increase the current timestamp for the node @@ -1617,7 +1617,7 @@ curl --request POST \ ### `evm_setNextBlockTimestamp` -[source](src/evm.rs) +[source](src/node/evm.rs) Sets the timestamp of the next block but doesn't mine one.. @@ -1640,7 +1640,7 @@ curl --request POST \ ### `evm_setTime` -[source](src/evm.rs) +[source](src/node/evm.rs) Set the current timestamp for the node. Warning: This will allow you to move _backwards_ in time, which may cause new blocks to appear to be mined before old blocks. This will result in an invalid state. @@ -1664,7 +1664,7 @@ curl --request POST \ ### `evm_snapshot` -[source](src/evm.rs) +[source](src/node/evm.rs) Snapshot the state of the blockchain at the current block. @@ -1687,7 +1687,7 @@ curl --request POST \ ### `evm_revert` -[source](src/evm.rs) +[source](src/node/evm.rs) Revert the state of the blockchain to a previous snapshot @@ -1712,7 +1712,7 @@ curl --request POST \ ### `zks_estimateFee` -[source](src/zks.rs) +[source](src/node/zks.rs) Generates and returns an estimate of how much gas is necessary to allow the transaction to complete @@ -1748,7 +1748,7 @@ curl --request POST \ ### `zks_getTokenPrice` -[source](src/zks.rs) +[source](src/node/zks.rs) Returns the token price given an Address @@ -1771,7 +1771,7 @@ curl --request POST \ ### `zks_getTransactionDetails` -[source](src/zks.rs) +[source](src/node/zks.rs) Returns data from a specific transaction given by the transaction hash. @@ -1794,7 +1794,7 @@ curl --request POST \ ### `zks_getBlockDetails` -[source](src/zks.rs) +[source](src/node/zks.rs) Returns additional zkSync-specific information about the L2 block. @@ -1817,7 +1817,7 @@ curl --request POST \ ### `zks_getBridgeContracts` -[source](src/zks.rs) +[source](src/node/zks.rs) Returns L1/L2 addresses of default bridges. @@ -1844,7 +1844,7 @@ curl --request POST \ ### `zks_getRawBlockTransactions` -[source](src/zks.rs) +[source](src/node/zks.rs) Returns data of transactions in a block. diff --git a/src/configuration_api.rs b/src/configuration_api.rs deleted file mode 100644 index 868c7b95..00000000 --- a/src/configuration_api.rs +++ /dev/null @@ -1,231 +0,0 @@ -// Built-in uses -use std::sync::{Arc, RwLock}; - -// External uses -use jsonrpc_core::Result; -use jsonrpc_derive::rpc; -use zksync_core::api_server::web3::backend_jsonrpc::error::into_jsrpc_error; -use zksync_web3_decl::error::Web3Error; - -// Workspace uses - -// Local uses -use crate::{ - node::InMemoryNodeInner, - node::ShowCalls, - node::ShowVMDetails, - node::{ShowGasDetails, ShowStorageLogs}, - observability::LogLevel, -}; - -pub struct ConfigurationApiNamespace { - node: Arc>>, -} - -impl ConfigurationApiNamespace { - pub fn new(node: Arc>>) -> Self { - Self { node } - } -} - -#[rpc] -pub trait ConfigurationApiNamespaceT { - /// Get the InMemoryNodeInner's show_calls property as a string - /// - /// # Returns - /// The current `show_calls` value for the InMemoryNodeInner. - #[rpc(name = "config_getShowCalls", returns = "String")] - fn config_get_show_calls(&self) -> Result; - - /// Get the InMemoryNodeInner's current_timestamp property - /// - /// # Returns - /// The current `current_timestamp` value for the InMemoryNodeInner. - #[rpc(name = "config_getCurrentTimestamp", returns = "u64")] - fn config_get_current_timestamp(&self) -> Result; - - /// Set show_calls for the InMemoryNodeInner - /// - /// # Parameters - /// - `value`: A ShowCalls enum to update show_calls to - /// - /// # Returns - /// The updated/current `show_calls` value for the InMemoryNodeInner. - #[rpc(name = "config_setShowCalls", returns = "String")] - fn config_set_show_calls(&self, value: String) -> Result; - - /// Set show_storage_logs for the InMemoryNodeInner - /// - /// # Parameters - /// - `value`: A ShowStorageLogs enum to update show_storage_logs to - /// - /// # Returns - /// The updated/current `show_storage_logs` value for the InMemoryNodeInner. - #[rpc(name = "config_setShowStorageLogs", returns = "String")] - fn config_set_show_storage_logs(&self, value: String) -> Result; - - /// Set show_vm_details for the InMemoryNodeInner - /// - /// # Parameters - /// - `value`: A ShowVMDetails enum to update show_vm_details to - /// - /// # Returns - /// The updated/current `show_vm_details` value for the InMemoryNodeInner. - #[rpc(name = "config_setShowVmDetails", returns = "String")] - fn config_set_show_vm_details(&self, value: String) -> Result; - - /// Set show_gas_details for the InMemoryNodeInner - /// - /// # Parameters - /// - `value`: A ShowGasDetails enum to update show_gas_details to - /// - /// # Returns - /// The updated/current `show_gas_details` value for the InMemoryNodeInner. - #[rpc(name = "config_setShowGasDetails", returns = "String")] - fn config_set_show_gas_details(&self, value: String) -> Result; - - /// Set resolve_hashes for the InMemoryNodeInner - /// - /// # Parameters - /// - `value`: A bool to update resolve_hashes to - /// - /// # Returns - /// The updated `resolve_hashes` value for the InMemoryNodeInner. - #[rpc(name = "config_setResolveHashes", returns = "bool")] - fn config_set_resolve_hashes(&self, value: bool) -> Result; - - /// Set the logging for the InMemoryNodeInner - /// - /// # Parameters - /// - `level`: The log level to set. One of: ["trace", "debug", "info", "warn", "error"] - /// - /// # Returns - /// `true` if the operation succeeded, `false` otherwise. - #[rpc(name = "config_setLogLevel", returns = "bool")] - fn config_set_log_level(&self, level: LogLevel) -> Result; - - /// Set the logging for the InMemoryNodeInner - /// - /// # Parameters - /// - `level`: The logging directive to set. Example: - /// * "my_crate=debug" - /// * "my_crate::module=trace" - /// * "my_crate=debug,other_crate=warn" - /// - /// # Returns - /// `true` if the operation succeeded, `false` otherwise. - #[rpc(name = "config_setLogging", returns = "bool")] - fn config_set_logging(&self, directive: String) -> Result; -} - -impl ConfigurationApiNamespaceT - for ConfigurationApiNamespace -{ - fn config_get_show_calls(&self) -> Result { - let reader = self.node.read().unwrap(); - Ok(reader.show_calls.to_string()) - } - - fn config_get_current_timestamp(&self) -> Result { - let reader = self.node.read().unwrap(); - Ok(reader.current_timestamp) - } - - fn config_set_show_calls(&self, value: String) -> Result { - let show_calls = match value.parse::() { - Ok(value) => value, - Err(_) => { - let reader = self.node.read().unwrap(); - return Ok(reader.show_calls.to_string()); - } - }; - - let mut inner = self.node.write().unwrap(); - inner.show_calls = show_calls; - Ok(inner.show_calls.to_string()) - } - - fn config_set_show_storage_logs(&self, value: String) -> Result { - let show_storage_logs = match value.parse::() { - Ok(value) => value, - Err(_) => { - let reader = self.node.read().unwrap(); - return Ok(reader.show_storage_logs.to_string()); - } - }; - - let mut inner = self.node.write().unwrap(); - inner.show_storage_logs = show_storage_logs; - Ok(inner.show_storage_logs.to_string()) - } - - fn config_set_show_vm_details(&self, value: String) -> Result { - let show_vm_details = match value.parse::() { - Ok(value) => value, - Err(_) => { - let reader = self.node.read().unwrap(); - return Ok(reader.show_vm_details.to_string()); - } - }; - - let mut inner = self.node.write().unwrap(); - inner.show_vm_details = show_vm_details; - Ok(inner.show_vm_details.to_string()) - } - - fn config_set_show_gas_details(&self, value: String) -> Result { - let show_gas_details = match value.parse::() { - Ok(value) => value, - Err(_) => { - let reader = self.node.read().unwrap(); - return Ok(reader.show_gas_details.to_string()); - } - }; - - let mut inner = self.node.write().unwrap(); - inner.show_gas_details = show_gas_details; - Ok(inner.show_gas_details.to_string()) - } - - fn config_set_resolve_hashes(&self, value: bool) -> Result { - let mut inner = self.node.write().unwrap(); - inner.resolve_hashes = value; - Ok(inner.resolve_hashes) - } - - fn config_set_log_level(&self, level: LogLevel) -> Result { - if let Some(observability) = &self - .node - .read() - .map_err(|_| into_jsrpc_error(Web3Error::InternalError))? - .observability - { - match observability.set_log_level(level.clone()) { - Ok(_) => tracing::info!("set log level to '{}'", level), - Err(err) => { - tracing::error!("failed setting log level {:?}", err); - return Ok(false); - } - } - } - Ok(true) - } - - fn config_set_logging(&self, directive: String) -> Result { - if let Some(observability) = &self - .node - .read() - .map_err(|_| into_jsrpc_error(Web3Error::InternalError))? - .observability - { - match observability.set_logging(&directive) { - Ok(_) => tracing::info!("set logging to '{}'", directive), - Err(err) => { - tracing::error!("failed setting logging to '{}': {:?}", directive, err); - return Ok(false); - } - } - } - Ok(true) - } -} diff --git a/src/evm.rs b/src/evm.rs deleted file mode 100644 index ed3efc78..00000000 --- a/src/evm.rs +++ /dev/null @@ -1,675 +0,0 @@ -use std::sync::{Arc, RwLock}; - -use crate::{ - fork::ForkSource, - node::{InMemoryNodeInner, Snapshot}, - utils::mine_empty_blocks, -}; -use jsonrpc_core::{BoxFuture, Result}; -use jsonrpc_derive::rpc; -use zksync_basic_types::U64; -use zksync_core::api_server::web3::backend_jsonrpc::error::into_jsrpc_error; -use zksync_web3_decl::error::Web3Error; - -/// The maximum number of [Snapshot]s to store. Each snapshot represents the node state -/// and can be used to revert the node to an earlier point in time. -const MAX_SNAPSHOTS: u8 = 100; - -/// Implementation of EvmNamespace -pub struct EvmNamespaceImpl { - node: Arc>>, - /// List of snapshots of the [InMemoryNodeInner]. This is bounded at runtime by [MAX_SNAPSHOTS]. - snapshots: Arc>>, -} - -impl EvmNamespaceImpl { - /// Creates a new `Evm` instance with the given `node`. - pub fn new(node: Arc>>) -> Self { - Self { - node, - snapshots: Default::default(), - } - } -} - -#[rpc] -pub trait EvmNamespaceT { - /// Increase the current timestamp for the node - /// - /// # Parameters - /// - `time_delta`: The number of seconds to increase time by - /// - /// # Returns - /// The applied time delta to `current_timestamp` value for the InMemoryNodeInner. - #[rpc(name = "evm_increaseTime")] - fn increase_time(&self, time_delta_seconds: u64) -> BoxFuture>; - - /// Force a single block to be mined. - /// - /// Will mine an empty block (containing zero transactions) - /// - /// # Returns - /// The string "0x0". - #[rpc(name = "evm_mine")] - fn evm_mine(&self) -> BoxFuture>; - - /// Set the current timestamp for the node. The timestamp must be in future. - /// - /// # Parameters - /// - `timestamp`: The timestamp to set the time to - /// - /// # Returns - /// The new timestamp value for the InMemoryNodeInner. - #[rpc(name = "evm_setNextBlockTimestamp")] - fn set_next_block_timestamp(&self, timestamp: u64) -> BoxFuture>; - - /// Set the current timestamp for the node. - /// Warning: This will allow you to move backwards in time, which may cause new blocks to appear to be - /// mined before old blocks. This will result in an invalid state. - /// - /// # Parameters - /// - `time`: The timestamp to set the time to - /// - /// # Returns - /// The difference between the `current_timestamp` and the new timestamp for the InMemoryNodeInner. - #[rpc(name = "evm_setTime")] - fn set_time(&self, time: u64) -> BoxFuture>; - - /// Snapshot the state of the blockchain at the current block. Takes no parameters. Returns the id of the snapshot - /// that was created. A snapshot can only be reverted once. After a successful evm_revert, the same snapshot id cannot - /// be used again. Consider creating a new snapshot after each evm_revert if you need to revert to the same - /// point multiple times. - /// - /// # Returns - /// The `U64` identifier for this snapshot. - #[rpc(name = "evm_snapshot")] - fn snapshot(&self) -> BoxFuture>; - - /// Revert the state of the blockchain to a previous snapshot. Takes a single parameter, - /// which is the snapshot id to revert to. This deletes the given snapshot, as well as any snapshots - /// taken after (e.g.: reverting to id 0x1 will delete snapshots with ids 0x1, 0x2, etc.) - /// - /// # Parameters - /// - `snapshot_id`: The snapshot id to revert. - /// - /// # Returns - /// `true` if a snapshot was reverted, otherwise `false`. - #[rpc(name = "evm_revert")] - fn revert_snapshot(&self, snapshot_id: U64) -> BoxFuture>; -} - -impl EvmNamespaceT - for EvmNamespaceImpl -{ - fn increase_time(&self, time_delta_seconds: u64) -> BoxFuture> { - let inner = Arc::clone(&self.node); - - Box::pin(async move { - if time_delta_seconds == 0 { - return Ok(time_delta_seconds); - } - - let time_delta = time_delta_seconds.saturating_mul(1000); - match inner.write() { - Ok(mut inner_guard) => { - inner_guard.current_timestamp = - inner_guard.current_timestamp.saturating_add(time_delta); - Ok(time_delta_seconds) - } - Err(_) => Err(into_jsrpc_error(Web3Error::InternalError)), - } - }) - } - - fn set_next_block_timestamp(&self, timestamp: u64) -> BoxFuture> { - let inner = Arc::clone(&self.node); - - Box::pin(async move { - match inner.write() { - Ok(mut inner_guard) => { - if timestamp < inner_guard.current_timestamp { - Err(into_jsrpc_error(Web3Error::InternalError)) - } else { - inner_guard.current_timestamp = timestamp; - Ok(timestamp) - } - } - Err(_) => Err(into_jsrpc_error(Web3Error::InternalError)), - } - }) - } - - fn set_time(&self, time: u64) -> BoxFuture> { - let inner = Arc::clone(&self.node); - - Box::pin(async move { - match inner.write() { - Ok(mut inner_guard) => { - let time_diff = - (time as i128).saturating_sub(inner_guard.current_timestamp as i128); - inner_guard.current_timestamp = time; - Ok(time_diff) - } - Err(_) => Err(into_jsrpc_error(Web3Error::InternalError)), - } - }) - } - - fn evm_mine(&self) -> BoxFuture> { - let inner = Arc::clone(&self.node); - Box::pin(async move { - match inner.write() { - Ok(mut inner) => { - mine_empty_blocks(&mut inner, 1, 1000); - tracing::info!("👷 Mined block #{}", inner.current_miniblock); - Ok("0x0".to_string()) - } - Err(_) => Err(into_jsrpc_error(Web3Error::InternalError)), - } - }) - } - - fn snapshot(&self) -> BoxFuture> { - let inner = Arc::clone(&self.node); - let snapshots = Arc::clone(&self.snapshots); - - Box::pin(async move { - // validate max snapshots - snapshots - .read() - .map_err(|err| { - tracing::error!("failed acquiring read lock for snapshot: {:?}", err); - into_jsrpc_error(Web3Error::InternalError) - }) - .and_then(|snapshots| { - if snapshots.len() >= MAX_SNAPSHOTS as usize { - tracing::error!("maximum number of '{}' snapshots exceeded", MAX_SNAPSHOTS); - Err(into_jsrpc_error(Web3Error::InternalError)) - } else { - Ok(()) - } - })?; - - // snapshot the node - let snapshot = inner - .read() - .map_err(|err| { - format!("failed acquiring read lock to node for snapshot: {:?}", err) - }) - .and_then(|reader| reader.snapshot()) - .map_err(|err| { - tracing::error!("failed creating snapshot: {:?}", err); - into_jsrpc_error(Web3Error::InternalError) - })?; - snapshots - .write() - .map(|mut snapshots| { - snapshots.push(snapshot); - tracing::info!("Created snapshot '{}'", snapshots.len()); - snapshots.len() - }) - .map_err(|err| { - tracing::error!("failed storing snapshot: {:?}", err); - into_jsrpc_error(Web3Error::InternalError) - }) - .map(U64::from) - }) - } - - fn revert_snapshot(&self, snapshot_id: U64) -> BoxFuture> { - let inner = Arc::clone(&self.node); - let snapshots = Arc::clone(&self.snapshots); - - Box::pin(async move { - let mut snapshots = snapshots.write().map_err(|err| { - tracing::error!("failed acquiring read lock for snapshots: {:?}", err); - into_jsrpc_error(Web3Error::InternalError) - })?; - let snapshot_id_index = snapshot_id.as_usize().saturating_sub(1); - if snapshot_id_index >= snapshots.len() { - tracing::error!("no snapshot exists for the id '{}'", snapshot_id); - return Err(into_jsrpc_error(Web3Error::InternalError)); - } - - // remove all snapshots following the index and use the first snapshot for restore - let selected_snapshot = snapshots - .drain(snapshot_id_index..) - .next() - .expect("unexpected failure, value must exist"); - - inner - .write() - .map_err(|err| format!("failed acquiring read lock for snapshots: {:?}", err)) - .and_then(|mut writer| { - tracing::info!("Reverting node to snapshot '{snapshot_id:?}'"); - writer.restore_snapshot(selected_snapshot).map(|_| true) - }) - .or_else(|err| { - tracing::error!( - "failed restoring snapshot for id '{}': {}", - snapshot_id, - err - ); - Ok(false) - }) - }) - } -} - -#[cfg(test)] -mod tests { - use crate::{http_fork_source::HttpForkSource, node::InMemoryNode}; - use zksync_core::api_server::web3::backend_jsonrpc::namespaces::eth::EthNamespaceT; - - use super::*; - - #[tokio::test] - async fn test_increase_time_zero_value() { - let node = InMemoryNode::::default(); - let evm = EvmNamespaceImpl::new(node.get_inner()); - - let increase_value_seconds = 0u64; - let timestamp_before = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .expect("failed reading timestamp"); - let expected_response = increase_value_seconds; - - let actual_response = evm - .increase_time(increase_value_seconds) - .await - .expect("failed increasing timestamp"); - let timestamp_after = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .expect("failed reading timestamp"); - - assert_eq!(expected_response, actual_response, "erroneous response"); - assert_eq!( - increase_value_seconds.saturating_mul(1000u64), - timestamp_after.saturating_sub(timestamp_before), - "timestamp did not increase by the specified amount", - ); - } - - #[tokio::test] - async fn test_increase_time_max_value() { - let node = InMemoryNode::::default(); - let evm = EvmNamespaceImpl::new(node.get_inner()); - - let increase_value_seconds = u64::MAX; - let timestamp_before = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .expect("failed reading timestamp"); - assert_ne!(0, timestamp_before, "initial timestamp must be non zero",); - let expected_response = increase_value_seconds; - - let actual_response = evm - .increase_time(increase_value_seconds) - .await - .expect("failed increasing timestamp"); - let timestamp_after = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .expect("failed reading timestamp"); - - assert_eq!(expected_response, actual_response, "erroneous response"); - assert_eq!( - u64::MAX, - timestamp_after, - "timestamp did not saturate upon increase", - ); - } - - #[tokio::test] - async fn test_increase_time() { - let node = InMemoryNode::::default(); - let evm = EvmNamespaceImpl::new(node.get_inner()); - - let increase_value_seconds = 100u64; - let timestamp_before = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .expect("failed reading timestamp"); - let expected_response = increase_value_seconds; - - let actual_response = evm - .increase_time(increase_value_seconds) - .await - .expect("failed increasing timestamp"); - let timestamp_after = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .expect("failed reading timestamp"); - - assert_eq!(expected_response, actual_response, "erroneous response"); - assert_eq!( - increase_value_seconds.saturating_mul(1000u64), - timestamp_after.saturating_sub(timestamp_before), - "timestamp did not increase by the specified amount", - ); - } - - #[tokio::test] - async fn test_set_next_block_timestamp_future() { - let node = InMemoryNode::::default(); - let evm = EvmNamespaceImpl::new(node.get_inner()); - - let new_timestamp = 10_000u64; - let timestamp_before = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .expect("failed reading timestamp"); - assert_ne!( - timestamp_before, new_timestamp, - "timestamps must be different" - ); - let expected_response = new_timestamp; - - let actual_response = evm - .set_next_block_timestamp(new_timestamp) - .await - .expect("failed setting timestamp"); - let timestamp_after = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .expect("failed reading timestamp"); - - assert_eq!(expected_response, actual_response, "erroneous response"); - assert_eq!( - new_timestamp, timestamp_after, - "timestamp was not set correctly", - ); - } - - #[tokio::test] - async fn test_set_next_block_timestamp_past_fails() { - let node = InMemoryNode::::default(); - let evm = EvmNamespaceImpl::new(node.get_inner()); - - let timestamp_before = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .expect("failed reading timestamp"); - - let new_timestamp = timestamp_before + 500; - evm.set_next_block_timestamp(new_timestamp) - .await - .expect("failed setting timestamp"); - - let result = evm.set_next_block_timestamp(timestamp_before).await; - - assert!(result.is_err(), "expected an error for timestamp in past"); - } - - #[tokio::test] - async fn test_set_next_block_timestamp_same_value() { - let node = InMemoryNode::::default(); - let evm = EvmNamespaceImpl::new(node.get_inner()); - - let new_timestamp = 1000u64; - let timestamp_before = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .expect("failed reading timestamp"); - assert_eq!(timestamp_before, new_timestamp, "timestamps must be same"); - let expected_response = new_timestamp; - - let actual_response = evm - .set_next_block_timestamp(new_timestamp) - .await - .expect("failed setting timestamp"); - let timestamp_after = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .expect("failed reading timestamp"); - - assert_eq!(expected_response, actual_response, "erroneous response"); - assert_eq!( - timestamp_before, timestamp_after, - "timestamp must not change", - ); - } - - #[tokio::test] - async fn test_set_time_future() { - let node = InMemoryNode::::default(); - let evm = EvmNamespaceImpl::new(node.get_inner()); - - let new_time = 10_000u64; - let timestamp_before = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .expect("failed reading timestamp"); - assert_ne!(timestamp_before, new_time, "timestamps must be different"); - let expected_response = 9000; - - let actual_response = evm - .set_time(new_time) - .await - .expect("failed setting timestamp"); - let timestamp_after = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .expect("failed reading timestamp"); - - assert_eq!(expected_response, actual_response, "erroneous response"); - assert_eq!(new_time, timestamp_after, "timestamp was not set correctly",); - } - - #[tokio::test] - async fn test_set_time_past() { - let node = InMemoryNode::::default(); - let evm = EvmNamespaceImpl::new(node.get_inner()); - - let new_time = 10u64; - let timestamp_before = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .expect("failed reading timestamp"); - assert_ne!(timestamp_before, new_time, "timestamps must be different"); - let expected_response = -990; - - let actual_response = evm - .set_time(new_time) - .await - .expect("failed setting timestamp"); - let timestamp_after = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .expect("failed reading timestamp"); - - assert_eq!(expected_response, actual_response, "erroneous response"); - assert_eq!(new_time, timestamp_after, "timestamp was not set correctly",); - } - - #[tokio::test] - async fn test_set_time_same_value() { - let node = InMemoryNode::::default(); - let evm = EvmNamespaceImpl::new(node.get_inner()); - - let new_time = 1000u64; - let timestamp_before = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .expect("failed reading timestamp"); - assert_eq!(timestamp_before, new_time, "timestamps must be same"); - let expected_response = 0; - - let actual_response = evm - .set_time(new_time) - .await - .expect("failed setting timestamp"); - let timestamp_after = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .expect("failed reading timestamp"); - - assert_eq!(expected_response, actual_response, "erroneous response"); - assert_eq!( - timestamp_before, timestamp_after, - "timestamp must not change", - ); - } - - #[tokio::test] - async fn test_set_time_edges() { - let node = InMemoryNode::::default(); - let evm = EvmNamespaceImpl::new(node.get_inner()); - - for new_time in [0, u64::MAX] { - let timestamp_before = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .unwrap_or_else(|_| panic!("case {}: failed reading timestamp", new_time)); - assert_ne!( - timestamp_before, new_time, - "case {new_time}: timestamps must be different" - ); - let expected_response = (new_time as i128).saturating_sub(timestamp_before as i128); - - let actual_response = evm - .set_time(new_time) - .await - .expect("failed setting timestamp"); - let timestamp_after = node - .get_inner() - .read() - .map(|inner| inner.current_timestamp) - .unwrap_or_else(|_| panic!("case {}: failed reading timestamp", new_time)); - - assert_eq!( - expected_response, actual_response, - "case {new_time}: erroneous response" - ); - assert_eq!( - new_time, timestamp_after, - "case {new_time}: timestamp was not set correctly", - ); - } - } - - #[tokio::test] - async fn test_evm_mine() { - let node = InMemoryNode::::default(); - let evm = EvmNamespaceImpl::new(node.get_inner()); - - let start_block = node - .get_block_by_number(zksync_types::api::BlockNumber::Latest, false) - .await - .unwrap() - .expect("block exists"); - let result = evm.evm_mine().await.expect("evm_mine"); - assert_eq!(&result, "0x0"); - - let current_block = node - .get_block_by_number(zksync_types::api::BlockNumber::Latest, false) - .await - .unwrap() - .expect("block exists"); - - assert_eq!(start_block.number + 1, current_block.number); - assert_eq!(start_block.timestamp + 1, current_block.timestamp); - - let result = evm.evm_mine().await.expect("evm_mine"); - assert_eq!(&result, "0x0"); - - let current_block = node - .get_block_by_number(zksync_types::api::BlockNumber::Latest, false) - .await - .unwrap() - .expect("block exists"); - - assert_eq!(start_block.number + 2, current_block.number); - assert_eq!(start_block.timestamp + 2, current_block.timestamp); - } - - #[tokio::test] - async fn test_evm_snapshot_creates_incrementing_ids() { - let node = InMemoryNode::::default(); - let evm = EvmNamespaceImpl::new(node.get_inner()); - - let snapshot_id_1 = evm.snapshot().await.expect("failed creating snapshot 1"); - let snapshot_id_2 = evm.snapshot().await.expect("failed creating snapshot 2"); - - assert_eq!(snapshot_id_1, U64::from(1)); - assert_eq!(snapshot_id_2, U64::from(2)); - } - - #[tokio::test] - async fn test_evm_revert_snapshot_restores_state() { - let node = InMemoryNode::::default(); - let evm = EvmNamespaceImpl::new(node.get_inner()); - - let initial_block = node - .get_block_number() - .await - .expect("failed fetching block number"); - let snapshot_id = evm.snapshot().await.expect("failed creating snapshot"); - evm.evm_mine().await.expect("evm_mine"); - let current_block = node - .get_block_number() - .await - .expect("failed fetching block number"); - assert_eq!(current_block, initial_block + 1); - - let reverted = evm - .revert_snapshot(snapshot_id) - .await - .expect("failed reverting snapshot"); - assert!(reverted); - - let restored_block = node - .get_block_number() - .await - .expect("failed fetching block number"); - assert_eq!(restored_block, initial_block); - } - - #[tokio::test] - async fn test_evm_revert_snapshot_removes_all_snapshots_following_the_reverted_one() { - let node = InMemoryNode::::default(); - let evm = EvmNamespaceImpl::new(node.get_inner()); - - let _snapshot_id_1 = evm.snapshot().await.expect("failed creating snapshot"); - let snapshot_id_2 = evm.snapshot().await.expect("failed creating snapshot"); - let _snapshot_id_3 = evm.snapshot().await.expect("failed creating snapshot"); - assert_eq!(3, evm.snapshots.read().unwrap().len()); - - let reverted = evm - .revert_snapshot(snapshot_id_2) - .await - .expect("failed reverting snapshot"); - assert!(reverted); - - assert_eq!(1, evm.snapshots.read().unwrap().len()); - } - - #[tokio::test] - async fn test_evm_revert_snapshot_fails_for_invalid_snapshot_id() { - let node = InMemoryNode::::default(); - let evm = EvmNamespaceImpl::new(node.get_inner()); - - let result = evm.revert_snapshot(U64::from(100)).await; - assert!(result.is_err()); - } -} diff --git a/src/hardhat.rs b/src/hardhat.rs deleted file mode 100644 index dc99774e..00000000 --- a/src/hardhat.rs +++ /dev/null @@ -1,518 +0,0 @@ -use std::sync::{Arc, RwLock}; - -use crate::{ - fork::ForkSource, - node::InMemoryNodeInner, - utils::{bytecode_to_factory_dep, mine_empty_blocks, IntoBoxedFuture}, -}; -use jsonrpc_core::{BoxFuture, Result}; -use jsonrpc_derive::rpc; -use zksync_basic_types::{Address, U256, U64}; -use zksync_core::api_server::web3::backend_jsonrpc::error::into_jsrpc_error; -use zksync_state::ReadStorage; -use zksync_types::{ - get_code_key, get_nonce_key, - utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; -use zksync_web3_decl::error::Web3Error; - -/// Implementation of HardhatNamespaceImpl -pub struct HardhatNamespaceImpl { - node: Arc>>, -} - -impl HardhatNamespaceImpl { - /// Creates a new `Hardhat` instance with the given `node`. - pub fn new(node: Arc>>) -> Self { - Self { node } - } -} - -#[rpc] -pub trait HardhatNamespaceT { - /// Sets the balance of the given address to the given balance. - /// - /// # Arguments - /// - /// * `address` - The `Address` whose balance will be edited - /// * `balance` - The new balance to set for the given address, in wei - /// - /// # Returns - /// - /// A `BoxFuture` containing a `Result` with a `bool` representing the success of the operation. - #[rpc(name = "hardhat_setBalance")] - fn set_balance(&self, address: Address, balance: U256) -> BoxFuture>; - - /// Modifies an account's nonce by overwriting it. - /// - /// # Arguments - /// - /// * `address` - The `Address` whose nonce is to be changed - /// * `nonce` - The new nonce - /// - /// # Returns - /// - /// A `BoxFuture` containing a `Result` with a `bool` representing the success of the operation. - #[rpc(name = "hardhat_setNonce")] - fn set_nonce(&self, address: Address, balance: U256) -> BoxFuture>; - - /// Sometimes you may want to advance the latest block number of the network by a large number of blocks. - /// One way to do this would be to call the evm_mine RPC method multiple times, but this is too slow if you want to mine thousands of blocks. - /// The hardhat_mine method can mine any number of blocks at once, in constant time. (It exhibits the same performance no matter how many blocks are mined.) - /// - /// # Arguments - /// - /// * `num_blocks` - The number of blocks to mine, defaults to 1 - /// * `interval` - The interval between the timestamps of each block, in seconds, and it also defaults to 1 - /// - /// # Returns - /// - /// A `BoxFuture` containing a `Result` with a `bool` representing the success of the operation. - #[rpc(name = "hardhat_mine")] - fn hardhat_mine( - &self, - num_blocks: Option, - interval: Option, - ) -> BoxFuture>; - - /// Hardhat Network allows you to send transactions impersonating specific account and contract addresses. - /// To impersonate an account use this method, passing the address to impersonate as its parameter. - /// After calling this method, any transactions with this sender will be executed without verification. - /// Multiple addresses can be impersonated at once. - /// - /// # Arguments - /// - /// * `address` - The address to impersonate - /// - /// # Returns - /// - /// A `BoxFuture` containing a `Result` with a `bool` representing the success of the operation. - #[rpc(name = "hardhat_impersonateAccount")] - fn impersonate_account(&self, address: Address) -> BoxFuture>; - - /// Use this method to stop impersonating an account after having previously used `hardhat_impersonateAccount` - /// The method returns `true` if the account was being impersonated and `false` otherwise. - /// - /// # Arguments - /// - /// * `address` - The address to stop impersonating. - /// - /// # Returns - /// - /// A `BoxFuture` containing a `Result` with a `bool` representing the success of the operation. - #[rpc(name = "hardhat_stopImpersonatingAccount")] - fn stop_impersonating_account(&self, address: Address) -> BoxFuture>; - - /// Modifies the bytecode stored at an account's address. - /// - /// # Arguments - /// - /// * `address` - The address where the given code should be stored. - /// * `code` - The code to be stored. - /// - /// # Returns - /// - /// A `BoxFuture` containing a `Result` with a `bool` representing the success of the operation. - #[rpc(name = "hardhat_setCode")] - fn set_code(&self, address: Address, code: Vec) -> BoxFuture>; -} - -impl HardhatNamespaceT - for HardhatNamespaceImpl -{ - fn set_balance( - &self, - address: Address, - balance: U256, - ) -> jsonrpc_core::BoxFuture> { - let inner = Arc::clone(&self.node); - - Box::pin(async move { - match inner.write() { - Ok(mut inner_guard) => { - let balance_key = storage_key_for_eth_balance(&address); - inner_guard - .fork_storage - .set_value(balance_key, u256_to_h256(balance)); - tracing::info!( - "👷 Balance for address {:?} has been manually set to {} Wei", - address, - balance - ); - Ok(true) - } - Err(_) => { - let web3_error = Web3Error::InternalError; - Err(into_jsrpc_error(web3_error)) - } - } - }) - } - - fn set_nonce( - &self, - address: Address, - nonce: U256, - ) -> jsonrpc_core::BoxFuture> { - let inner = Arc::clone(&self.node); - Box::pin(async move { - match inner.write() { - Ok(mut inner_guard) => { - let nonce_key = get_nonce_key(&address); - let full_nonce = inner_guard.fork_storage.read_value(&nonce_key); - let (mut account_nonce, mut deployment_nonce) = - decompose_full_nonce(h256_to_u256(full_nonce)); - if account_nonce >= nonce { - return Err(jsonrpc_core::Error::invalid_params(format!( - "Account Nonce is already set to a higher value ({}, requested {})", - account_nonce, nonce - ))); - } - account_nonce = nonce; - if deployment_nonce >= nonce { - return Err(jsonrpc_core::Error::invalid_params(format!( - "Deployment Nonce is already set to a higher value ({}, requested {})", - deployment_nonce, nonce - ))); - } - deployment_nonce = nonce; - let enforced_full_nonce = nonces_to_full_nonce(account_nonce, deployment_nonce); - tracing::info!( - "👷 Nonces for address {:?} have been set to {}", - address, - nonce - ); - inner_guard - .fork_storage - .set_value(nonce_key, u256_to_h256(enforced_full_nonce)); - Ok(true) - } - Err(_) => Err(into_jsrpc_error(Web3Error::InternalError)), - } - }) - } - - fn hardhat_mine( - &self, - num_blocks: Option, - interval: Option, - ) -> BoxFuture> { - let inner = Arc::clone(&self.node); - Box::pin(async move { - match inner.write() { - Ok(mut inner) => { - let num_blocks = num_blocks.unwrap_or_else(|| U64::from(1)); - let interval_ms = interval - .unwrap_or_else(|| U64::from(1)) - .saturating_mul(1_000.into()); - if num_blocks.is_zero() { - return Err(jsonrpc_core::Error::invalid_params( - "Number of blocks must be greater than 0".to_string(), - )); - } - mine_empty_blocks(&mut inner, num_blocks.as_u64(), interval_ms.as_u64()); - tracing::info!("👷 Mined {} blocks", num_blocks); - Ok(true) - } - Err(_) => Err(into_jsrpc_error(Web3Error::InternalError)), - } - }) - } - - fn impersonate_account(&self, address: Address) -> BoxFuture> { - let inner = Arc::clone(&self.node); - Box::pin(async move { - match inner.write() { - Ok(mut inner) => { - if inner.set_impersonated_account(address) { - tracing::info!("🕵️ Account {:?} has been impersonated", address); - Ok(true) - } else { - tracing::info!("🕵️ Account {:?} was already impersonated", address); - Ok(false) - } - } - Err(_) => Err(into_jsrpc_error(Web3Error::InternalError)), - } - }) - } - - fn stop_impersonating_account(&self, address: Address) -> BoxFuture> { - let inner = Arc::clone(&self.node); - Box::pin(async move { - match inner.write() { - Ok(mut inner) => { - if inner.stop_impersonating_account(address) { - tracing::info!("🕵️ Stopped impersonating account {:?}", address); - Ok(true) - } else { - tracing::info!( - "🕵️ Account {:?} was not impersonated, nothing to stop", - address - ); - Ok(false) - } - } - Err(_) => Err(into_jsrpc_error(Web3Error::InternalError)), - } - }) - } - - fn set_code(&self, address: Address, code: Vec) -> BoxFuture> { - let inner = Arc::clone(&self.node); - inner - .write() - .map(|mut writer| { - let code_key = get_code_key(&address); - tracing::info!("set code for address {address:#x}"); - let (hash, code) = bytecode_to_factory_dep(code); - let hash = u256_to_h256(hash); - writer.fork_storage.store_factory_dep( - hash, - code.iter() - .flat_map(|entry| { - let mut bytes = vec![0u8; 32]; - entry.to_big_endian(&mut bytes); - bytes.to_vec() - }) - .collect(), - ); - writer.fork_storage.set_value(code_key, hash); - }) - .map_err(|_| into_jsrpc_error(Web3Error::InternalError)) - .into_boxed_future() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{http_fork_source::HttpForkSource, node::InMemoryNode}; - use std::str::FromStr; - use zksync_basic_types::{Nonce, H256}; - use zksync_core::api_server::web3::backend_jsonrpc::namespaces::eth::EthNamespaceT; - use zksync_types::{api::BlockNumber, fee::Fee, l2::L2Tx, PackedEthSignature}; - - #[tokio::test] - async fn test_set_balance() { - let address = Address::from_str("0x36615Cf349d7F6344891B1e7CA7C72883F5dc049").unwrap(); - let node = InMemoryNode::::default(); - let hardhat = HardhatNamespaceImpl::new(node.get_inner()); - - let balance_before = node.get_balance(address, None).await.unwrap(); - - let result = hardhat - .set_balance(address, U256::from(1337)) - .await - .unwrap(); - assert!(result); - - let balance_after = node.get_balance(address, None).await.unwrap(); - assert_eq!(balance_after, U256::from(1337)); - assert_ne!(balance_before, balance_after); - } - - #[tokio::test] - async fn test_set_nonce() { - let address = Address::from_str("0x36615Cf349d7F6344891B1e7CA7C72883F5dc049").unwrap(); - let node = InMemoryNode::::default(); - let hardhat = HardhatNamespaceImpl::new(node.get_inner()); - - let nonce_before = node.get_transaction_count(address, None).await.unwrap(); - - let result = hardhat.set_nonce(address, U256::from(1337)).await.unwrap(); - assert!(result); - - let nonce_after = node.get_transaction_count(address, None).await.unwrap(); - assert_eq!(nonce_after, U256::from(1337)); - assert_ne!(nonce_before, nonce_after); - - // setting nonce lower than the current one should fail - let result = hardhat.set_nonce(address, U256::from(1336)).await; - assert!(result.is_err()); - } - - #[tokio::test] - async fn test_hardhat_mine_default() { - let node = InMemoryNode::::default(); - let hardhat = HardhatNamespaceImpl::new(node.get_inner()); - - let start_block = node - .get_block_by_number(zksync_types::api::BlockNumber::Latest, false) - .await - .unwrap() - .expect("block exists"); - - // test with defaults - let result = hardhat - .hardhat_mine(None, None) - .await - .expect("hardhat_mine"); - assert!(result); - - let current_block = node - .get_block_by_number(zksync_types::api::BlockNumber::Latest, false) - .await - .unwrap() - .expect("block exists"); - - assert_eq!(start_block.number + 1, current_block.number); - assert_eq!(start_block.timestamp + 1, current_block.timestamp); - let result = hardhat - .hardhat_mine(None, None) - .await - .expect("hardhat_mine"); - assert!(result); - - let current_block = node - .get_block_by_number(zksync_types::api::BlockNumber::Latest, false) - .await - .unwrap() - .expect("block exists"); - - assert_eq!(start_block.number + 2, current_block.number); - assert_eq!(start_block.timestamp + 2, current_block.timestamp); - } - - #[tokio::test] - async fn test_hardhat_mine_custom() { - let node = InMemoryNode::::default(); - let hardhat: HardhatNamespaceImpl = - HardhatNamespaceImpl::new(node.get_inner()); - - let start_block = node - .get_block_by_number(zksync_types::api::BlockNumber::Latest, false) - .await - .unwrap() - .expect("block exists"); - - let num_blocks = 5; - let interval = 3; - let start_timestamp = start_block.timestamp + 1; - - let result = hardhat - .hardhat_mine(Some(U64::from(num_blocks)), Some(U64::from(interval))) - .await - .expect("hardhat_mine"); - assert!(result); - - for i in 0..num_blocks { - let current_block = node - .get_block_by_number(BlockNumber::Number(start_block.number + i + 1), false) - .await - .unwrap() - .expect("block exists"); - assert_eq!(start_block.number + i + 1, current_block.number); - assert_eq!( - start_timestamp + i * interval * 1_000, - current_block.timestamp - ); - } - } - - #[tokio::test] - async fn test_impersonate_account() { - let node = InMemoryNode::::default(); - let hardhat: HardhatNamespaceImpl = - HardhatNamespaceImpl::new(node.get_inner()); - let to_impersonate = - Address::from_str("0xd8da6bf26964af9d7eed9e03e53415d37aa96045").unwrap(); - - // give impersonated account some balance - let result = hardhat - .set_balance(to_impersonate, U256::exp10(18)) - .await - .unwrap(); - assert!(result); - - // construct a tx - let mut tx = L2Tx::new( - Address::random(), - vec![], - Nonce(0), - Fee { - gas_limit: U256::from(1_000_000), - max_fee_per_gas: U256::from(250_000_000), - max_priority_fee_per_gas: U256::from(250_000_000), - gas_per_pubdata_limit: U256::from(20000), - }, - to_impersonate, - U256::one(), - None, - Default::default(), - ); - tx.set_input(vec![], H256::random()); - if tx.common_data.signature.is_empty() { - tx.common_data.signature = PackedEthSignature::default().serialize_packed().into(); - } - - // try to execute the tx- should fail without signature - assert!(node.apply_txs(vec![tx.clone()]).is_err()); - - // impersonate the account - let result = hardhat - .impersonate_account(to_impersonate) - .await - .expect("impersonate_account"); - - // result should be true - assert!(result); - - // impersonating the same account again should return false - let result = hardhat - .impersonate_account(to_impersonate) - .await - .expect("impersonate_account"); - assert!(!result); - - // execution should now succeed - assert!(node.apply_txs(vec![tx.clone()]).is_ok()); - - // stop impersonating the account - let result = hardhat - .stop_impersonating_account(to_impersonate) - .await - .expect("stop_impersonating_account"); - - // result should be true - assert!(result); - - // stop impersonating the same account again should return false - let result = hardhat - .stop_impersonating_account(to_impersonate) - .await - .expect("stop_impersonating_account"); - assert!(!result); - - // execution should now fail again - assert!(node.apply_txs(vec![tx]).is_err()); - } - - #[tokio::test] - async fn test_set_code() { - let address = Address::repeat_byte(0x1); - let node = InMemoryNode::::default(); - let hardhat = HardhatNamespaceImpl::new(node.get_inner()); - let new_code = vec![0x1u8; 32]; - - let code_before = node - .get_code(address, None) - .await - .expect("failed getting code") - .0; - assert_eq!(Vec::::default(), code_before); - - hardhat - .set_code(address, new_code.clone()) - .await - .expect("failed setting code"); - - let code_after = node - .get_code(address, None) - .await - .expect("failed getting code") - .0; - assert_eq!(new_code, code_after); - } -} diff --git a/src/http_fork_source.rs b/src/http_fork_source.rs index 6061db0b..8e902cc3 100644 --- a/src/http_fork_source.rs +++ b/src/http_fork_source.rs @@ -1,4 +1,4 @@ -use std::sync::RwLock; +use std::sync::{Arc, RwLock}; use crate::{ cache::{Cache, CacheConfig}, @@ -14,20 +14,20 @@ use zksync_web3_decl::{ types::Index, }; -#[derive(Debug)] +#[derive(Debug, Clone)] /// Fork source that gets the data via HTTP requests. pub struct HttpForkSource { /// URL for the network to fork. pub fork_url: String, /// Cache for network data. - pub(crate) cache: RwLock, + pub(crate) cache: Arc>, } impl HttpForkSource { pub fn new(fork_url: String, cache_config: CacheConfig) -> Self { Self { fork_url, - cache: RwLock::new(Cache::new(cache_config)), + cache: Arc::new(RwLock::new(Cache::new(cache_config))), } } diff --git a/src/lib.rs b/src/lib.rs index f92e7c7c..d28de8ac 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -42,19 +42,18 @@ //! Contributions to improve `era-test-node` are welcome. Please refer to the [contribution guidelines](https://github.com/matter-labs/era-test-node/blob/main/.github/CONTRIBUTING.md) for more details. pub mod bootloader_debug; -pub mod configuration_api; pub mod console_log; pub mod deps; pub mod filters; pub mod fork; pub mod formatter; pub mod http_fork_source; +pub mod namespaces; pub mod node; pub mod observability; pub mod resolver; pub mod system_contracts; pub mod utils; -pub mod zks; mod cache; mod testing; diff --git a/src/main.rs b/src/main.rs index e23bc6f8..1b4cd7a6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,41 +1,31 @@ use crate::cache::CacheConfig; -use crate::hardhat::{HardhatNamespaceImpl, HardhatNamespaceT}; use crate::node::{InMemoryNodeConfig, ShowGasDetails, ShowStorageLogs, ShowVMDetails}; use crate::observability::Observability; use clap::{Parser, Subcommand, ValueEnum}; -use configuration_api::ConfigurationApiNamespaceT; -use debug::DebugNamespaceImpl; -use evm::{EvmNamespaceImpl, EvmNamespaceT}; use fork::{ForkDetails, ForkSource}; use logging_middleware::LoggingMiddleware; use node::ShowCalls; use observability::LogLevel; use tracing_subscriber::filter::LevelFilter; -use zks::ZkMockNamespaceImpl; mod bootloader_debug; mod cache; -mod configuration_api; mod console_log; -mod debug; mod deps; -mod evm; mod filters; mod fork; mod formatter; -mod hardhat; mod http_fork_source; mod logging_middleware; +mod namespaces; mod node; pub mod observability; mod resolver; mod system_contracts; mod testing; mod utils; -mod zks; use node::InMemoryNode; -use zksync_core::api_server::web3::namespaces::NetNamespace; use std::fs::File; use std::{ @@ -50,11 +40,11 @@ use futures::{ FutureExt, }; use jsonrpc_core::MetaIoHandler; -use zksync_basic_types::{L2ChainId, H160, H256}; +use zksync_basic_types::{H160, H256}; -use crate::{configuration_api::ConfigurationApiNamespace, node::TEST_NODE_NETWORK_ID}; -use zksync_core::api_server::web3::backend_jsonrpc::namespaces::{ - debug::DebugNamespaceT, eth::EthNamespaceT, net::NetNamespaceT, zks::ZksNamespaceT, +use crate::namespaces::{ + ConfigurationApiNamespaceT, DebugNamespaceT, EthNamespaceT, EvmNamespaceT, HardhatNamespaceT, + NetNamespaceT, ZksNamespaceT, }; /// List of wallets (address, private key) that we seed with tokens at start. @@ -103,29 +93,24 @@ pub const RICH_WALLETS: [(&str, &str); 10] = [ #[allow(clippy::too_many_arguments)] async fn build_json_http< - S: std::marker::Sync + std::marker::Send + 'static + ForkSource + std::fmt::Debug, + S: std::marker::Sync + std::marker::Send + 'static + ForkSource + std::fmt::Debug + Clone, >( addr: SocketAddr, log_level_filter: LevelFilter, node: InMemoryNode, - net: NetNamespace, - config_api: ConfigurationApiNamespace, - evm: EvmNamespaceImpl, - zks: ZkMockNamespaceImpl, - hardhat: HardhatNamespaceImpl, - debug: DebugNamespaceImpl, ) -> tokio::task::JoinHandle<()> { let (sender, recv) = oneshot::channel::<()>(); let io_handler = { let mut io = MetaIoHandler::with_middleware(LoggingMiddleware::new(log_level_filter)); - io.extend_with(node.to_delegate()); - io.extend_with(net.to_delegate()); - io.extend_with(config_api.to_delegate()); - io.extend_with(evm.to_delegate()); - io.extend_with(zks.to_delegate()); - io.extend_with(hardhat.to_delegate()); - io.extend_with(debug.to_delegate()); + + io.extend_with(NetNamespaceT::to_delegate(node.clone())); + io.extend_with(ConfigurationApiNamespaceT::to_delegate(node.clone())); + io.extend_with(DebugNamespaceT::to_delegate(node.clone())); + io.extend_with(EthNamespaceT::to_delegate(node.clone())); + io.extend_with(EvmNamespaceT::to_delegate(node.clone())); + io.extend_with(HardhatNamespaceT::to_delegate(node.clone())); + io.extend_with(ZksNamespaceT::to_delegate(node)); io }; @@ -337,23 +322,10 @@ async fn main() -> anyhow::Result<()> { tracing::info!(""); } - let net = NetNamespace::new(L2ChainId::from(TEST_NODE_NETWORK_ID)); - let config_api = ConfigurationApiNamespace::new(node.get_inner()); - let evm = EvmNamespaceImpl::new(node.get_inner()); - let zks = ZkMockNamespaceImpl::new(node.get_inner()); - let hardhat = HardhatNamespaceImpl::new(node.get_inner()); - let debug = DebugNamespaceImpl::new(node.get_inner()); - let threads = build_json_http( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), opt.port), log_level_filter, node, - net, - config_api, - evm, - zks, - hardhat, - debug, ) .await; diff --git a/src/namespaces/config.rs b/src/namespaces/config.rs new file mode 100644 index 00000000..32214b8c --- /dev/null +++ b/src/namespaces/config.rs @@ -0,0 +1,93 @@ +use crate::namespaces::Result; +use crate::observability::LogLevel; +use jsonrpc_derive::rpc; + +#[rpc] +pub trait ConfigurationApiNamespaceT { + /// Get the InMemoryNodeInner's show_calls property as a string + /// + /// # Returns + /// The current `show_calls` value for the InMemoryNodeInner. + #[rpc(name = "config_getShowCalls", returns = "String")] + fn config_get_show_calls(&self) -> Result; + + /// Get the InMemoryNodeInner's current_timestamp property + /// + /// # Returns + /// The current `current_timestamp` value for the InMemoryNodeInner. + #[rpc(name = "config_getCurrentTimestamp", returns = "u64")] + fn config_get_current_timestamp(&self) -> Result; + + /// Set show_calls for the InMemoryNodeInner + /// + /// # Parameters + /// - `value`: A ShowCalls enum to update show_calls to + /// + /// # Returns + /// The updated/current `show_calls` value for the InMemoryNodeInner. + #[rpc(name = "config_setShowCalls", returns = "String")] + fn config_set_show_calls(&self, value: String) -> Result; + + /// Set show_storage_logs for the InMemoryNodeInner + /// + /// # Parameters + /// - `value`: A ShowStorageLogs enum to update show_storage_logs to + /// + /// # Returns + /// The updated/current `show_storage_logs` value for the InMemoryNodeInner. + #[rpc(name = "config_setShowStorageLogs", returns = "String")] + fn config_set_show_storage_logs(&self, value: String) -> Result; + + /// Set show_vm_details for the InMemoryNodeInner + /// + /// # Parameters + /// - `value`: A ShowVMDetails enum to update show_vm_details to + /// + /// # Returns + /// The updated/current `show_vm_details` value for the InMemoryNodeInner. + #[rpc(name = "config_setShowVmDetails", returns = "String")] + fn config_set_show_vm_details(&self, value: String) -> Result; + + /// Set show_gas_details for the InMemoryNodeInner + /// + /// # Parameters + /// - `value`: A ShowGasDetails enum to update show_gas_details to + /// + /// # Returns + /// The updated/current `show_gas_details` value for the InMemoryNodeInner. + #[rpc(name = "config_setShowGasDetails", returns = "String")] + fn config_set_show_gas_details(&self, value: String) -> Result; + + /// Set resolve_hashes for the InMemoryNodeInner + /// + /// # Parameters + /// - `value`: A bool to update resolve_hashes to + /// + /// # Returns + /// The updated `resolve_hashes` value for the InMemoryNodeInner. + #[rpc(name = "config_setResolveHashes", returns = "bool")] + fn config_set_resolve_hashes(&self, value: bool) -> Result; + + /// Set the logging for the InMemoryNodeInner + /// + /// # Parameters + /// - `level`: The log level to set. One of: ["trace", "debug", "info", "warn", "error"] + /// + /// # Returns + /// `true` if the operation succeeded, `false` otherwise. + #[rpc(name = "config_setLogLevel", returns = "bool")] + fn config_set_log_level(&self, level: LogLevel) -> Result; + + /// Set the logging for the InMemoryNodeInner + /// + /// # Parameters + /// - `level`: The logging directive to set. Example: + /// * "my_crate=debug" + /// * "my_crate::module=trace" + /// * "my_crate=debug,other_crate=warn" + /// + /// # Returns + /// `true` if the operation succeeded, `false` otherwise. + #[rpc(name = "config_setLogging", returns = "bool")] + fn config_set_logging(&self, directive: String) -> Result; +} diff --git a/src/debug.rs b/src/namespaces/debug.rs similarity index 100% rename from src/debug.rs rename to src/namespaces/debug.rs diff --git a/src/namespaces/evm.rs b/src/namespaces/evm.rs new file mode 100644 index 00000000..79b95341 --- /dev/null +++ b/src/namespaces/evm.rs @@ -0,0 +1,70 @@ +use jsonrpc_derive::rpc; +use zksync_basic_types::U64; + +use crate::namespaces::RpcResult; + +#[rpc] +pub trait EvmNamespaceT { + /// Increase the current timestamp for the node + /// + /// # Parameters + /// - `time_delta`: The number of seconds to increase time by + /// + /// # Returns + /// The applied time delta to `current_timestamp` value for the InMemoryNodeInner. + #[rpc(name = "evm_increaseTime")] + fn increase_time(&self, time_delta_seconds: u64) -> RpcResult; + + /// Force a single block to be mined. + /// + /// Will mine an empty block (containing zero transactions) + /// + /// # Returns + /// The string "0x0". + #[rpc(name = "evm_mine")] + fn evm_mine(&self) -> RpcResult; + + /// Set the current timestamp for the node. The timestamp must be in future. + /// + /// # Parameters + /// - `timestamp`: The timestamp to set the time to + /// + /// # Returns + /// The new timestamp value for the InMemoryNodeInner. + #[rpc(name = "evm_setNextBlockTimestamp")] + fn set_next_block_timestamp(&self, timestamp: u64) -> RpcResult; + + /// Set the current timestamp for the node. + /// Warning: This will allow you to move backwards in time, which may cause new blocks to appear to be + /// mined before old blocks. This will result in an invalid state. + /// + /// # Parameters + /// - `time`: The timestamp to set the time to + /// + /// # Returns + /// The difference between the `current_timestamp` and the new timestamp for the InMemoryNodeInner. + #[rpc(name = "evm_setTime")] + fn set_time(&self, time: u64) -> RpcResult; + + /// Snapshot the state of the blockchain at the current block. Takes no parameters. Returns the id of the snapshot + /// that was created. A snapshot can only be reverted once. After a successful evm_revert, the same snapshot id cannot + /// be used again. Consider creating a new snapshot after each evm_revert if you need to revert to the same + /// point multiple times. + /// + /// # Returns + /// The `U64` identifier for this snapshot. + #[rpc(name = "evm_snapshot")] + fn snapshot(&self) -> RpcResult; + + /// Revert the state of the blockchain to a previous snapshot. Takes a single parameter, + /// which is the snapshot id to revert to. This deletes the given snapshot, as well as any snapshots + /// taken after (e.g.: reverting to id 0x1 will delete snapshots with ids 0x1, 0x2, etc.) + /// + /// # Parameters + /// - `snapshot_id`: The snapshot id to revert. + /// + /// # Returns + /// `true` if a snapshot was reverted, otherwise `false`. + #[rpc(name = "evm_revert")] + fn revert_snapshot(&self, snapshot_id: U64) -> RpcResult; +} diff --git a/src/namespaces/hardhat.rs b/src/namespaces/hardhat.rs new file mode 100644 index 00000000..5a69f6b5 --- /dev/null +++ b/src/namespaces/hardhat.rs @@ -0,0 +1,89 @@ +use jsonrpc_derive::rpc; +use zksync_basic_types::{Address, U256, U64}; + +use super::RpcResult; + +#[rpc] +pub trait HardhatNamespaceT { + /// Sets the balance of the given address to the given balance. + /// + /// # Arguments + /// + /// * `address` - The `Address` whose balance will be edited + /// * `balance` - The new balance to set for the given address, in wei + /// + /// # Returns + /// + /// A `BoxFuture` containing a `Result` with a `bool` representing the success of the operation. + #[rpc(name = "hardhat_setBalance")] + fn set_balance(&self, address: Address, balance: U256) -> RpcResult; + + /// Modifies an account's nonce by overwriting it. + /// + /// # Arguments + /// + /// * `address` - The `Address` whose nonce is to be changed + /// * `nonce` - The new nonce + /// + /// # Returns + /// + /// A `BoxFuture` containing a `Result` with a `bool` representing the success of the operation. + #[rpc(name = "hardhat_setNonce")] + fn set_nonce(&self, address: Address, balance: U256) -> RpcResult; + + /// Sometimes you may want to advance the latest block number of the network by a large number of blocks. + /// One way to do this would be to call the evm_mine RPC method multiple times, but this is too slow if you want to mine thousands of blocks. + /// The hardhat_mine method can mine any number of blocks at once, in constant time. (It exhibits the same performance no matter how many blocks are mined.) + /// + /// # Arguments + /// + /// * `num_blocks` - The number of blocks to mine, defaults to 1 + /// * `interval` - The interval between the timestamps of each block, in seconds, and it also defaults to 1 + /// + /// # Returns + /// + /// A `BoxFuture` containing a `Result` with a `bool` representing the success of the operation. + #[rpc(name = "hardhat_mine")] + fn hardhat_mine(&self, num_blocks: Option, interval: Option) -> RpcResult; + + /// Hardhat Network allows you to send transactions impersonating specific account and contract addresses. + /// To impersonate an account use this method, passing the address to impersonate as its parameter. + /// After calling this method, any transactions with this sender will be executed without verification. + /// Multiple addresses can be impersonated at once. + /// + /// # Arguments + /// + /// * `address` - The address to impersonate + /// + /// # Returns + /// + /// A `BoxFuture` containing a `Result` with a `bool` representing the success of the operation. + #[rpc(name = "hardhat_impersonateAccount")] + fn impersonate_account(&self, address: Address) -> RpcResult; + + /// Use this method to stop impersonating an account after having previously used `hardhat_impersonateAccount` + /// The method returns `true` if the account was being impersonated and `false` otherwise. + /// + /// # Arguments + /// + /// * `address` - The address to stop impersonating. + /// + /// # Returns + /// + /// A `BoxFuture` containing a `Result` with a `bool` representing the success of the operation. + #[rpc(name = "hardhat_stopImpersonatingAccount")] + fn stop_impersonating_account(&self, address: Address) -> RpcResult; + + /// Modifies the bytecode stored at an account's address. + /// + /// # Arguments + /// + /// * `address` - The address where the given code should be stored. + /// * `code` - The code to be stored. + /// + /// # Returns + /// + /// A `BoxFuture` containing a `Result` with a `bool` representing the success of the operation. + #[rpc(name = "hardhat_setCode")] + fn set_code(&self, address: Address, code: Vec) -> RpcResult<()>; +} diff --git a/src/namespaces/mod.rs b/src/namespaces/mod.rs new file mode 100644 index 00000000..97ecc35e --- /dev/null +++ b/src/namespaces/mod.rs @@ -0,0 +1,17 @@ +mod config; +mod evm; +mod hardhat; +mod net; + +use zksync_core::api_server::web3::backend_jsonrpc::namespaces::{debug, eth, zks}; + +pub use config::ConfigurationApiNamespaceT; +pub use debug::DebugNamespaceT; +pub use eth::EthNamespaceT; +pub use evm::EvmNamespaceT; +pub use hardhat::HardhatNamespaceT; +pub use net::NetNamespaceT; +pub use zks::ZksNamespaceT; + +pub type Result = jsonrpc_core::Result; +pub type RpcResult = jsonrpc_core::BoxFuture>; diff --git a/src/namespaces/net.rs b/src/namespaces/net.rs new file mode 100644 index 00000000..f18aa142 --- /dev/null +++ b/src/namespaces/net.rs @@ -0,0 +1,16 @@ +use jsonrpc_derive::rpc; +use zksync_basic_types::U256; + +use crate::namespaces::Result; + +#[rpc] +pub trait NetNamespaceT { + #[rpc(name = "net_version", returns = "String")] + fn net_version(&self) -> Result; + + #[rpc(name = "net_peerCount", returns = "U256")] + fn net_peer_count(&self) -> Result; + + #[rpc(name = "net_listening", returns = "bool")] + fn net_listening(&self) -> Result; +} diff --git a/src/net.rs b/src/net.rs deleted file mode 100644 index 0db92adf..00000000 --- a/src/net.rs +++ /dev/null @@ -1,38 +0,0 @@ -// Built-in uses - -// External uses -use jsonrpc_core::Result; -use jsonrpc_derive::rpc; - -// Workspace uses -use zksync_types::U256; - -// Local uses -use crate::web3::namespaces::NetNamespace; -use crate::node::TEST_NODE_NETWORK_ID; - -#[rpc] -pub trait NetNamespaceT { - #[rpc(name = "net_version", returns = "String")] - fn net_version(&self) -> Result; - - #[rpc(name = "net_peerCount", returns = "U256")] - fn net_peer_count(&self) -> Result; - - #[rpc(name = "net_listening", returns = "bool")] - fn net_listening(&self) -> Result; -} - -impl NetNamespaceT for NetNamespace { - fn net_version(&self) -> Result { - Ok(String::From(TEST_NODE_NETWORK_ID)) - } - - fn net_peer_count(&self) -> Result { - Ok(U256::From(0)) - } - - fn net_listening(&self) -> Result { - Ok(false) - } -} diff --git a/src/node/config.rs b/src/node/config.rs new file mode 100644 index 00000000..81f350ef --- /dev/null +++ b/src/node/config.rs @@ -0,0 +1,189 @@ +use zksync_core::api_server::web3::backend_jsonrpc::error::into_jsrpc_error; +use zksync_web3_decl::error::Web3Error; + +use crate::{ + fork::ForkSource, + namespaces::{ConfigurationApiNamespaceT, Result}, + node::InMemoryNode, + observability::LogLevel, +}; + +use super::{ShowCalls, ShowGasDetails, ShowStorageLogs, ShowVMDetails}; + +impl ConfigurationApiNamespaceT + for InMemoryNode +{ + fn config_get_show_calls(&self) -> Result { + self.get_inner() + .read() + .map_err(|err| { + tracing::error!("failed acquiring lock: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .map(|reader| reader.show_calls.to_string()) + } + + fn config_get_current_timestamp(&self) -> Result { + self.get_inner() + .read() + .map_err(|err| { + tracing::error!("failed acquiring lock: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .map(|reader| reader.current_timestamp) + } + + fn config_set_show_calls(&self, value: String) -> Result { + let show_calls = match value.parse::() { + Ok(value) => value, + Err(_) => return self.config_get_show_calls(), + }; + + self.get_inner() + .write() + .map_err(|err| { + tracing::error!("failed acquiring lock: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .map(|mut writer| { + writer.show_calls = show_calls; + writer.show_calls.to_string() + }) + } + + fn config_set_show_storage_logs(&self, value: String) -> Result { + let show_storage_logs = match value.parse::() { + Ok(value) => value, + Err(_) => { + return self + .get_inner() + .read() + .map_err(|err| { + tracing::error!("failed acquiring lock: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .map(|reader| reader.show_storage_logs.to_string()) + } + }; + + self.get_inner() + .write() + .map_err(|err| { + tracing::error!("failed acquiring lock: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .map(|mut writer| { + writer.show_storage_logs = show_storage_logs; + writer.show_storage_logs.to_string() + }) + } + + fn config_set_show_vm_details(&self, value: String) -> Result { + let show_vm_details = match value.parse::() { + Ok(value) => value, + Err(_) => { + return self + .get_inner() + .read() + .map_err(|err| { + tracing::error!("failed acquiring lock: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .map(|reader| reader.show_vm_details.to_string()) + } + }; + + self.get_inner() + .write() + .map_err(|err| { + tracing::error!("failed acquiring lock: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .map(|mut writer| { + writer.show_vm_details = show_vm_details; + writer.show_vm_details.to_string() + }) + } + + fn config_set_show_gas_details(&self, value: String) -> Result { + let show_gas_details = match value.parse::() { + Ok(value) => value, + Err(_) => { + return self + .get_inner() + .read() + .map_err(|err| { + tracing::error!("failed acquiring lock: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .map(|reader| reader.show_gas_details.to_string()) + } + }; + + self.get_inner() + .write() + .map_err(|err| { + tracing::error!("failed acquiring lock: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .map(|mut writer| { + writer.show_gas_details = show_gas_details; + writer.show_gas_details.to_string() + }) + } + + fn config_set_resolve_hashes(&self, value: bool) -> Result { + self.get_inner() + .write() + .map_err(|err| { + tracing::error!("failed acquiring lock: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .map(|mut writer| { + writer.resolve_hashes = value; + writer.resolve_hashes + }) + } + + fn config_set_log_level(&self, level: LogLevel) -> Result { + if let Some(observability) = &self + .get_inner() + .read() + .map_err(|err| { + tracing::error!("failed acquiring lock: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + })? + .observability + { + match observability.set_log_level(level.clone()) { + Ok(_) => tracing::info!("set log level to '{}'", level), + Err(err) => { + tracing::error!("failed setting log level {:?}", err); + return Ok(false); + } + } + } + Ok(true) + } + + fn config_set_logging(&self, directive: String) -> Result { + if let Some(observability) = &self + .get_inner() + .read() + .map_err(|err| { + tracing::error!("failed acquiring lock: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + })? + .observability + { + match observability.set_logging(&directive) { + Ok(_) => tracing::info!("set logging to '{}'", directive), + Err(err) => { + tracing::error!("failed setting logging to '{}': {:?}", directive, err); + return Ok(false); + } + } + } + Ok(true) + } +} diff --git a/src/node/debug.rs b/src/node/debug.rs new file mode 100644 index 00000000..88dce918 --- /dev/null +++ b/src/node/debug.rs @@ -0,0 +1,562 @@ +use itertools::Itertools; +use multivm::vm_virtual_blocks::{constants::ETH_CALL_GAS_LIMIT, CallTracer, HistoryDisabled, Vm}; +use once_cell::sync::OnceCell; +use std::sync::Arc; +use zksync_basic_types::H256; +use zksync_core::api_server::web3::backend_jsonrpc::error::into_jsrpc_error; +use zksync_state::StorageView; +use zksync_types::{ + api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig, TransactionVariant}, + l2::L2Tx, + transaction_request::CallRequest, + PackedEthSignature, Transaction, U64, +}; +use zksync_web3_decl::error::Web3Error; + +use crate::{ + fork::ForkSource, + namespaces::{DebugNamespaceT, Result, RpcResult}, + node::{InMemoryNode, MAX_TX_SIZE}, + utils::{create_debug_output, to_real_block_number}, +}; + +impl DebugNamespaceT + for InMemoryNode +{ + fn trace_block_by_number( + &self, + block: BlockNumber, + options: Option, + ) -> RpcResult> { + let only_top = options.is_some_and(|o| o.tracer_config.only_top_call); + let inner = self.get_inner().clone(); + Box::pin(async move { + let inner = inner + .read() + .map_err(|_| into_jsrpc_error(Web3Error::InternalError))?; + + let block = { + let number = + to_real_block_number(block, U64::from(inner.current_miniblock)).as_u64(); + + inner + .block_hashes + .get(&number) + .and_then(|hash| inner.blocks.get(hash)) + .ok_or_else(|| { + into_jsrpc_error(Web3Error::SubmitTransactionError( + "Block not found".to_string(), + vec![], + )) + })? + }; + + let tx_hashes = block + .transactions + .iter() + .map(|tx| match tx { + TransactionVariant::Full(tx) => tx.hash, + TransactionVariant::Hash(hash) => *hash, + }) + .collect_vec(); + + let debug_calls = tx_hashes + .into_iter() + .map(|tx_hash| { + let tx = inner.tx_results.get(&tx_hash).ok_or_else(|| { + into_jsrpc_error(Web3Error::SubmitTransactionError( + "Transaction not found".to_string(), + vec![], + )) + })?; + Ok(tx.debug_info(only_top)) + }) + .collect::>>()? + .into_iter() + .map(|result| ResultDebugCall { result }) + .collect_vec(); + + Ok(debug_calls) + }) + } + + fn trace_block_by_hash( + &self, + hash: H256, + options: Option, + ) -> RpcResult> { + let only_top = options.is_some_and(|o| o.tracer_config.only_top_call); + let inner = self.get_inner().clone(); + Box::pin(async move { + let inner = inner + .read() + .map_err(|_| into_jsrpc_error(Web3Error::InternalError))?; + + let block = inner.blocks.get(&hash).ok_or_else(|| { + into_jsrpc_error(Web3Error::SubmitTransactionError( + "Block not found".to_string(), + vec![], + )) + })?; + + let tx_hashes = block + .transactions + .iter() + .map(|tx| match tx { + TransactionVariant::Full(tx) => tx.hash, + TransactionVariant::Hash(hash) => *hash, + }) + .collect_vec(); + + let debug_calls = tx_hashes + .into_iter() + .map(|tx_hash| { + let tx = inner.tx_results.get(&tx_hash).ok_or_else(|| { + into_jsrpc_error(Web3Error::SubmitTransactionError( + "Transaction not found".to_string(), + vec![], + )) + })?; + Ok(tx.debug_info(only_top)) + }) + .collect::>>()? + .into_iter() + .map(|result| ResultDebugCall { result }) + .collect_vec(); + + Ok(debug_calls) + }) + } + + /// Trace execution of a transaction. + fn trace_call( + &self, + request: CallRequest, + block: Option, + options: Option, + ) -> RpcResult { + let only_top = options.is_some_and(|o| o.tracer_config.only_top_call); + let inner = self.get_inner().clone(); + Box::pin(async move { + if block.is_some() && !matches!(block, Some(BlockId::Number(BlockNumber::Latest))) { + return Err(jsonrpc_core::Error::invalid_params( + "tracing only supported at `latest` block", + )); + } + + let inner = inner + .read() + .map_err(|_| into_jsrpc_error(Web3Error::InternalError))?; + + let mut l2_tx = match L2Tx::from_request(request.into(), MAX_TX_SIZE) { + Ok(tx) => tx, + Err(e) => { + let error = Web3Error::SerializationError(e); + return Err(into_jsrpc_error(error)); + } + }; + + let execution_mode = multivm::interface::TxExecutionMode::EthCall; + let storage = StorageView::new(&inner.fork_storage).to_rc_ptr(); + + let bootloader_code = inner.system_contracts.contracts_for_l2_call(); + + // init vm + let (mut l1_batch_env, _block_context) = inner.create_l1_batch_env(storage.clone()); + + // update the enforced_base_fee within l1_batch_env to match the logic in zksync_core + l1_batch_env.enforced_base_fee = Some(l2_tx.common_data.fee.max_fee_per_gas.as_u64()); + let system_env = inner.create_system_env(bootloader_code.clone(), execution_mode); + let mut vm = Vm::new(l1_batch_env, system_env, storage, HistoryDisabled); + + // We must inject *some* signature (otherwise bootloader code fails to generate hash). + if l2_tx.common_data.signature.is_empty() { + l2_tx.common_data.signature = + PackedEthSignature::default().serialize_packed().into(); + } + + // Match behavior of zksync_core: + // Protection against infinite-loop eth_calls and alike: + // limiting the amount of gas the call can use. + l2_tx.common_data.fee.gas_limit = ETH_CALL_GAS_LIMIT.into(); + + let tx: Transaction = l2_tx.clone().into(); + vm.push_transaction(tx); + + let call_tracer_result = Arc::new(OnceCell::default()); + let tracer = CallTracer::new(call_tracer_result.clone(), HistoryDisabled); + let tx_result = vm.inspect( + vec![Box::new(tracer)], + multivm::interface::VmExecutionMode::OneTx, + ); + + let call_traces = if only_top { + vec![] + } else { + Arc::try_unwrap(call_tracer_result) + .unwrap() + .take() + .unwrap_or_default() + }; + + let debug = + create_debug_output(&l2_tx, &tx_result, call_traces).map_err(into_jsrpc_error)?; + + Ok(debug) + }) + } + + fn trace_transaction( + &self, + tx_hash: H256, + options: Option, + ) -> RpcResult> { + let only_top = options.is_some_and(|o| o.tracer_config.only_top_call); + let inner = self.get_inner().clone(); + Box::pin(async move { + let inner = inner + .read() + .map_err(|_| into_jsrpc_error(Web3Error::InternalError))?; + + Ok(inner + .tx_results + .get(&tx_hash) + .map(|tx| tx.debug_info(only_top))) + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + deps::system_contracts::bytecode_from_slice, + http_fork_source::HttpForkSource, + node::{InMemoryNode, TransactionResult}, + testing::{self, LogBuilder}, + }; + use ethers::abi::{short_signature, AbiEncode, HumanReadableParser, ParamType, Token}; + use zksync_basic_types::{Address, Nonce, H160, U256}; + use zksync_types::{ + api::{Block, CallTracerConfig, SupportedTracers, TransactionReceipt}, + transaction_request::CallRequestBuilder, + utils::deployed_address_create, + }; + + fn deploy_test_contracts(node: &InMemoryNode) -> (Address, Address) { + let private_key = H256::repeat_byte(0xee); + let from_account = zksync_types::PackedEthSignature::address_from_private_key(&private_key) + .expect("failed generating address"); + node.set_rich_account(from_account); + + // first, deploy secondary contract + let secondary_bytecode = bytecode_from_slice( + "Secondary", + include_bytes!("../deps/test-contracts/Secondary.json"), + ); + let secondary_deployed_address = deployed_address_create(from_account, U256::zero()); + testing::deploy_contract( + &node, + H256::repeat_byte(0x1), + private_key, + secondary_bytecode, + Some((U256::from(2),).encode()), + Nonce(0), + ); + + // deploy primary contract using the secondary contract address as a constructor parameter + let primary_bytecode = bytecode_from_slice( + "Primary", + include_bytes!("../deps/test-contracts/Primary.json"), + ); + let primary_deployed_address = deployed_address_create(from_account, U256::one()); + testing::deploy_contract( + &node, + H256::repeat_byte(0x1), + private_key, + primary_bytecode, + Some((secondary_deployed_address).encode()), + Nonce(1), + ); + (primary_deployed_address, secondary_deployed_address) + } + + #[tokio::test] + async fn test_trace_deployed_contract() { + let node = InMemoryNode::::default(); + + let (primary_deployed_address, secondary_deployed_address) = deploy_test_contracts(&node); + + // trace a call to the primary contract + let func = HumanReadableParser::parse_function("calculate(uint)").unwrap(); + let calldata = func.encode_input(&[Token::Uint(U256::from(42))]).unwrap(); + let request = CallRequestBuilder::default() + .to(primary_deployed_address) + .data(calldata.clone().into()) + .gas(80_000_000.into()) + .build(); + let trace = node + .trace_call(request.clone(), None, None) + .await + .expect("trace call"); + + // call should not revert + assert!(trace.error.is_none()); + assert!(trace.revert_reason.is_none()); + + // check that the call was successful + let output = + ethers::abi::decode(&[ParamType::Uint(256)], &trace.output.0.as_slice()).unwrap(); + assert_eq!(output[0], Token::Uint(U256::from(84))); + + // find the call to primary contract in the trace + let contract_call = trace + .calls + .first() + .unwrap() + .calls + .last() + .unwrap() + .calls + .first() + .unwrap(); + + assert_eq!(contract_call.to, primary_deployed_address); + assert_eq!(contract_call.input, calldata.into()); + + // check that it contains a call to secondary contract + let subcall = contract_call.calls.first().unwrap(); + assert_eq!(subcall.to, secondary_deployed_address); + assert_eq!(subcall.from, primary_deployed_address); + assert_eq!(subcall.output, U256::from(84).encode().into()); + } + + #[tokio::test] + async fn test_trace_only_top() { + let node = InMemoryNode::::default(); + + let (primary_deployed_address, _) = deploy_test_contracts(&node); + + // trace a call to the primary contract + let func = HumanReadableParser::parse_function("calculate(uint)").unwrap(); + let calldata = func.encode_input(&[Token::Uint(U256::from(42))]).unwrap(); + let request = CallRequestBuilder::default() + .to(primary_deployed_address) + .data(calldata.into()) + .gas(80_000_000.into()) + .build(); + + // if we trace with onlyTopCall=true, we should get only the top-level call + let trace = node + .trace_call( + request, + None, + Some(TracerConfig { + tracer: SupportedTracers::CallTracer, + tracer_config: CallTracerConfig { + only_top_call: true, + }, + }), + ) + .await + .expect("trace call"); + // call should not revert + assert!(trace.error.is_none()); + assert!(trace.revert_reason.is_none()); + + // call should not contain any subcalls + assert!(trace.calls.is_empty()); + } + + #[tokio::test] + async fn test_trace_reverts() { + let node = InMemoryNode::::default(); + + let (primary_deployed_address, _) = deploy_test_contracts(&node); + + // trace a call to the primary contract + let request = CallRequestBuilder::default() + .to(primary_deployed_address) + .data(short_signature("shouldRevert()", &[]).into()) + .gas(80_000_000.into()) + .build(); + let trace = node + .trace_call(request, None, None) + .await + .expect("trace call"); + + // call should revert + assert!(trace.revert_reason.is_some()); + + // find the call to primary contract in the trace + let contract_call = trace + .calls + .first() + .unwrap() + .calls + .last() + .unwrap() + .calls + .first() + .unwrap(); + + // the contract subcall should have reverted + assert!(contract_call.revert_reason.is_some()); + } + + #[tokio::test] + async fn test_trace_transaction() { + let node = InMemoryNode::::default(); + let inner = node.get_inner(); + { + let mut writer = inner.write().unwrap(); + writer.tx_results.insert( + H256::repeat_byte(0x1), + TransactionResult { + info: testing::default_tx_execution_info(), + receipt: TransactionReceipt { + logs: vec![LogBuilder::new() + .set_address(H160::repeat_byte(0xa1)) + .build()], + ..Default::default() + }, + debug: testing::default_tx_debug_info(), + }, + ); + } + let result = node + .trace_transaction(H256::repeat_byte(0x1), None) + .await + .unwrap() + .unwrap(); + assert_eq!(result.calls.len(), 1); + } + + #[tokio::test] + async fn test_trace_transaction_only_top() { + let node = InMemoryNode::::default(); + let inner = node.get_inner(); + { + let mut writer = inner.write().unwrap(); + writer.tx_results.insert( + H256::repeat_byte(0x1), + TransactionResult { + info: testing::default_tx_execution_info(), + receipt: TransactionReceipt { + logs: vec![LogBuilder::new() + .set_address(H160::repeat_byte(0xa1)) + .build()], + ..Default::default() + }, + debug: testing::default_tx_debug_info(), + }, + ); + } + let result = node + .trace_transaction( + H256::repeat_byte(0x1), + Some(TracerConfig { + tracer: SupportedTracers::CallTracer, + tracer_config: CallTracerConfig { + only_top_call: true, + }, + }), + ) + .await + .unwrap() + .unwrap(); + assert!(result.calls.is_empty()); + } + + #[tokio::test] + async fn test_trace_transaction_not_found() { + let node = InMemoryNode::::default(); + let result = node + .trace_transaction(H256::repeat_byte(0x1), None) + .await + .unwrap(); + assert!(result.is_none()); + } + + #[tokio::test] + async fn test_trace_block_by_hash_empty() { + let node = InMemoryNode::::default(); + let inner = node.get_inner(); + { + let mut writer = inner.write().unwrap(); + let block = Block::::default(); + writer.blocks.insert(H256::repeat_byte(0x1), block); + } + let result = node + .trace_block_by_hash(H256::repeat_byte(0x1), None) + .await + .unwrap(); + assert_eq!(result.len(), 0); + } + + #[tokio::test] + async fn test_trace_block_by_hash() { + let node = InMemoryNode::::default(); + let inner = node.get_inner(); + { + let mut writer = inner.write().unwrap(); + let tx = zksync_types::api::Transaction::default(); + let tx_hash = tx.hash; + let mut block = Block::::default(); + block.transactions.push(TransactionVariant::Full(tx)); + writer.blocks.insert(H256::repeat_byte(0x1), block); + writer.tx_results.insert( + tx_hash, + TransactionResult { + info: testing::default_tx_execution_info(), + receipt: TransactionReceipt::default(), + debug: testing::default_tx_debug_info(), + }, + ); + } + let result = node + .trace_block_by_hash(H256::repeat_byte(0x1), None) + .await + .unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].result.calls.len(), 1); + } + + #[tokio::test] + async fn test_trace_block_by_number() { + let node = InMemoryNode::::default(); + let inner = node.get_inner(); + { + let mut writer = inner.write().unwrap(); + let tx = zksync_types::api::Transaction::default(); + let tx_hash = tx.hash; + let mut block = Block::::default(); + block.transactions.push(TransactionVariant::Full(tx)); + writer.blocks.insert(H256::repeat_byte(0x1), block); + writer.block_hashes.insert(0, H256::repeat_byte(0x1)); + writer.tx_results.insert( + tx_hash, + TransactionResult { + info: testing::default_tx_execution_info(), + receipt: TransactionReceipt::default(), + debug: testing::default_tx_debug_info(), + }, + ); + } + // check `latest` alias + let result = node + .trace_block_by_number(BlockNumber::Latest, None) + .await + .unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].result.calls.len(), 1); + + // check block number + let result = node + .trace_block_by_number(BlockNumber::Number(0.into()), None) + .await + .unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].result.calls.len(), 1); + } +} diff --git a/src/node.rs b/src/node/eth.rs similarity index 59% rename from src/node.rs rename to src/node/eth.rs index 3a3098b7..cfd63267 100644 --- a/src/node.rs +++ b/src/node/eth.rs @@ -1,1661 +1,42 @@ -//! In-memory node, that supports forking other networks. -use crate::{ - bootloader_debug::{BootloaderDebug, BootloaderDebugTracer}, - console_log::ConsoleLogHandler, - deps::InMemoryStorage, - filters::{EthFilters, FilterType, LogFilter}, - fork::{ForkDetails, ForkSource, ForkStorage}, - formatter, - observability::Observability, - system_contracts::{self, Options, SystemContracts}, - utils::{ - self, adjust_l1_gas_price_for_tx, bytecode_to_factory_dep, create_debug_output, - not_implemented, to_human_size, IntoBoxedFuture, - }, -}; -use clap::Parser; +use std::collections::HashSet; + use colored::Colorize; -use core::fmt::Display; use futures::FutureExt; -use indexmap::IndexMap; use itertools::Itertools; -use jsonrpc_core::BoxFuture; -use once_cell::sync::OnceCell; -use std::{ - cmp::{self}, - collections::{HashMap, HashSet}, - str::FromStr, - sync::{Arc, RwLock}, -}; - -use multivm::interface::{ - ExecutionResult, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, -}; -use multivm::vm_virtual_blocks::{ - constants::{ - BLOCK_GAS_LIMIT, BLOCK_OVERHEAD_PUBDATA, ETH_CALL_GAS_LIMIT, MAX_PUBDATA_PER_BLOCK, - }, - utils::{ - fee::derive_base_fee_and_gas_per_pubdata, - l2_blocks::load_last_l2_block, - overhead::{derive_overhead, OverheadCoeficients}, - }, - CallTracer, HistoryDisabled, Vm, VmTracer, -}; -use zksync_basic_types::{ - web3::{self, signing::keccak256}, - AccountTreeId, Address, Bytes, L1BatchNumber, MiniblockNumber, H160, H256, U256, U64, -}; -use zksync_contracts::BaseSystemContracts; -use zksync_core::api_server::web3::backend_jsonrpc::{ - error::into_jsrpc_error, namespaces::eth::EthNamespaceT, -}; -use zksync_state::{ReadStorage, StoragePtr, StorageView, WriteStorage}; +use multivm::interface::{ExecutionResult, TxExecutionMode}; +use multivm::vm_virtual_blocks::constants::ETH_CALL_GAS_LIMIT; +use zksync_basic_types::{web3, AccountTreeId, Address, Bytes, H160, H256, U256, U64}; +use zksync_core::api_server::web3::backend_jsonrpc::error::into_jsrpc_error; +use zksync_state::ReadStorage; use zksync_types::{ - api::{Block, DebugCall, Log, TransactionReceipt, TransactionVariant}, - block::legacy_miniblock_hash, + api::{Block, BlockIdVariant, BlockNumber, TransactionVariant}, fee::Fee, get_code_key, get_nonce_key, l2::L2Tx, - l2::TransactionType, transaction_request::TransactionRequest, - utils::{ - decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance, - storage_key_for_standard_token_balance, - }, - vm_trace::Call, - PackedEthSignature, StorageKey, StorageLogQueryType, StorageValue, Transaction, - ACCOUNT_CODE_STORAGE_ADDRESS, EIP_712_TX_TYPE, L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, - MAX_L2_TX_GAS_LIMIT, -}; -use zksync_utils::{ - bytecode::{compress_bytecode, hash_bytecode}, - h256_to_account_address, h256_to_u256, h256_to_u64, u256_to_h256, + utils::storage_key_for_standard_token_balance, + StorageKey, L2_ETH_TOKEN_ADDRESS, }; +use zksync_utils::{h256_to_u256, h256_to_u64, u256_to_h256}; use zksync_web3_decl::{ error::Web3Error, - types::{FeeHistory, Filter, FilterChanges}, + types::{FeeHistory, Filter, FilterChanges, SyncState}, }; -/// Max possible size of an ABI encoded tx (in bytes). -pub const MAX_TX_SIZE: usize = 1_000_000; -/// Timestamp of the first block (if not running in fork mode). -pub const NON_FORK_FIRST_BLOCK_TIMESTAMP: u64 = 1_000; -/// Network ID we use for the test node. -pub const TEST_NODE_NETWORK_ID: u32 = 260; -/// L1 Gas Price. -pub const L1_GAS_PRICE: u64 = 50_000_000_000; -/// L2 Gas Price (0.25 gwei). -pub const L2_GAS_PRICE: u64 = 250_000_000; -/// L1 Gas Price Scale Factor for gas estimation. -pub const ESTIMATE_GAS_L1_GAS_PRICE_SCALE_FACTOR: f64 = 1.2; -/// The max possible number of gas that `eth_estimateGas` is allowed to overestimate. -pub const ESTIMATE_GAS_PUBLISH_BYTE_OVERHEAD: u32 = 100; -/// Acceptable gas overestimation limit. -pub const ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION: u32 = 1_000; -/// The factor by which to scale the gasLimit. -pub const ESTIMATE_GAS_SCALE_FACTOR: f32 = 1.3; -/// The maximum number of previous blocks to store the state for. -pub const MAX_PREVIOUS_STATES: u16 = 128; -/// The zks protocol version. -pub const PROTOCOL_VERSION: &str = "zks/1"; - -pub fn compute_hash(block_number: u64, tx_hash: H256) -> H256 { - let digest = [&block_number.to_be_bytes()[..], tx_hash.as_bytes()].concat(); - H256(keccak256(&digest)) -} - -pub fn create_empty_block(block_number: u64, timestamp: u64, batch: u32) -> Block { - let hash = compute_hash(block_number, H256::zero()); - Block { - hash, - number: U64::from(block_number), - timestamp: U256::from(timestamp), - l1_batch_number: Some(U64::from(batch)), - transactions: vec![], - gas_used: U256::from(0), - gas_limit: U256::from(BLOCK_GAS_LIMIT), - ..Default::default() - } -} - -/// Information about the executed transaction. -#[derive(Debug, Clone)] -pub struct TxExecutionInfo { - pub tx: L2Tx, - // Batch number where transaction was executed. - pub batch_number: u32, - pub miniblock_number: u64, - pub result: VmExecutionResultAndLogs, -} - -#[derive(Debug, Default, clap::Parser, Clone, clap::ValueEnum, PartialEq, Eq)] -pub enum ShowCalls { - #[default] - None, - User, - System, - All, -} - -impl FromStr for ShowCalls { - type Err = String; - - fn from_str(s: &str) -> Result { - match s.to_lowercase().as_ref() { - "none" => Ok(ShowCalls::None), - "user" => Ok(ShowCalls::User), - "system" => Ok(ShowCalls::System), - "all" => Ok(ShowCalls::All), - _ => Err(format!( - "Unknown ShowCalls value {} - expected one of none|user|system|all.", - s - )), - } - } -} - -impl Display for ShowCalls { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { - write!(f, "{:?}", self) - } -} - -#[derive(Debug, Default, Parser, Clone, clap::ValueEnum, PartialEq, Eq)] -pub enum ShowStorageLogs { - #[default] - None, - Read, - Write, - All, -} - -impl FromStr for ShowStorageLogs { - type Err = String; - - fn from_str(s: &str) -> Result { - match s.to_lowercase().as_ref() { - "none" => Ok(ShowStorageLogs::None), - "read" => Ok(ShowStorageLogs::Read), - "write" => Ok(ShowStorageLogs::Write), - "all" => Ok(ShowStorageLogs::All), - _ => Err(format!( - "Unknown ShowStorageLogs value {} - expected one of none|read|write|all.", - s - )), - } - } -} - -impl Display for ShowStorageLogs { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { - write!(f, "{:?}", self) - } -} - -#[derive(Debug, Default, Parser, Clone, clap::ValueEnum, PartialEq, Eq)] -pub enum ShowVMDetails { - #[default] - None, - All, -} - -impl FromStr for ShowVMDetails { - type Err = String; - - fn from_str(s: &str) -> Result { - match s.to_lowercase().as_ref() { - "none" => Ok(ShowVMDetails::None), - "all" => Ok(ShowVMDetails::All), - _ => Err(format!( - "Unknown ShowVMDetails value {} - expected one of none|all.", - s - )), - } - } -} - -impl Display for ShowVMDetails { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { - write!(f, "{:?}", self) - } -} - -#[derive(Debug, Default, Parser, Clone, clap::ValueEnum, PartialEq, Eq)] -pub enum ShowGasDetails { - #[default] - None, - All, -} - -impl FromStr for ShowGasDetails { - type Err = String; - - fn from_str(s: &str) -> Result { - match s.to_lowercase().as_ref() { - "none" => Ok(ShowGasDetails::None), - "all" => Ok(ShowGasDetails::All), - _ => Err(format!( - "Unknown ShowGasDetails value {} - expected one of none|all.", - s - )), - } - } -} - -impl Display for ShowGasDetails { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { - write!(f, "{:?}", self) - } -} - -#[derive(Debug, Clone)] -pub struct TransactionResult { - pub info: TxExecutionInfo, - pub receipt: TransactionReceipt, - pub debug: DebugCall, -} - -impl TransactionResult { - /// Returns the debug information for the transaction. - /// If `only_top` is true - will only return the top level call. - pub fn debug_info(&self, only_top: bool) -> DebugCall { - let calls = if only_top { - vec![] - } else { - self.debug.calls.clone() - }; - DebugCall { - calls, - ..self.debug.clone() - } - } -} - -/// Helper struct for InMemoryNode. -/// S - is the Source of the Fork. -#[derive(Clone)] -pub struct InMemoryNodeInner { - /// The latest timestamp that was already generated. - /// Next block will be current_timestamp + 1 - pub current_timestamp: u64, - /// The latest batch number that was already generated. - /// Next block will be current_batch + 1 - pub current_batch: u32, - /// The latest miniblock number that was already generated. - /// Next transaction will go to the block current_miniblock + 1 - pub current_miniblock: u64, - /// The latest miniblock hash. - pub current_miniblock_hash: H256, - pub l1_gas_price: u64, - // Map from transaction to details about the exeuction - pub tx_results: HashMap, - // Map from block hash to information about the block. - pub blocks: HashMap>, - // Map from block number to a block hash. - pub block_hashes: HashMap, - // Map from filter_id to the eth filter - pub filters: EthFilters, - // Underlying storage - pub fork_storage: ForkStorage, - // Debug level information. - pub show_calls: ShowCalls, - // Displays storage logs. - pub show_storage_logs: ShowStorageLogs, - // Displays VM details. - pub show_vm_details: ShowVMDetails, - // Gas details information. - pub show_gas_details: ShowGasDetails, - // If true - will contact openchain to resolve the ABI to function names. - pub resolve_hashes: bool, - pub console_log_handler: ConsoleLogHandler, - pub system_contracts: SystemContracts, - pub impersonated_accounts: HashSet
, - pub rich_accounts: HashSet, - /// Keeps track of historical states indexed via block hash. Limited to [MAX_PREVIOUS_STATES]. - pub previous_states: IndexMap>, - /// An optional handle to the observability stack - pub observability: Option, -} - -type L2TxResult = ( - HashMap, - VmExecutionResultAndLogs, - Vec, - Block, - HashMap>, - BlockContext, -); - -impl InMemoryNodeInner { - pub fn create_l1_batch_env( - &self, - storage: StoragePtr, - ) -> (L1BatchEnv, BlockContext) { - let last_l2_block_hash = if let Some(last_l2_block) = load_last_l2_block(storage) { - last_l2_block.hash - } else { - // This is the scenario of either the first L2 block ever or - // the first block after the upgrade for support of L2 blocks. - legacy_miniblock_hash(MiniblockNumber(self.current_miniblock as u32)) - }; - let block_ctx = BlockContext::from_current( - self.current_batch, - self.current_miniblock, - self.current_timestamp, - ); - let block_ctx = block_ctx.new_batch(); - let batch_env = L1BatchEnv { - // TODO: set the previous batch hash properly (take from fork, when forking, and from local storage, when this is not the first block). - previous_batch_hash: None, - number: L1BatchNumber::from(block_ctx.batch), - timestamp: block_ctx.timestamp, - l1_gas_price: self.l1_gas_price, - fair_l2_gas_price: L2_GAS_PRICE, - fee_account: H160::zero(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - // the 'current_miniblock' contains the block that was already produced. - // So the next one should be one higher. - number: block_ctx.miniblock as u32, - timestamp: block_ctx.timestamp, - prev_block_hash: last_l2_block_hash, - // This is only used during zksyncEra block timestamp/number transition. - // In case of starting a new network, it doesn't matter. - // In theory , when forking mainnet, we should match this value - // to the value that was set in the node at that time - but AFAIK - // we don't have any API for this - so this might result in slightly - // incorrect replays of transacions during the migration period, that - // depend on block number or timestamp. - max_virtual_blocks_to_create: 1, - }, - }; - - (batch_env, block_ctx) - } - - pub fn create_system_env( - &self, - base_system_contracts: BaseSystemContracts, - execution_mode: TxExecutionMode, - ) -> SystemEnv { - SystemEnv { - zk_porter_available: false, - // TODO: when forking, we could consider taking the protocol version id from the fork itself. - version: zksync_types::ProtocolVersionId::latest(), - base_system_smart_contracts: base_system_contracts, - gas_limit: BLOCK_GAS_LIMIT, - execution_mode, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: self.fork_storage.chain_id, - } - } - - /// Estimates the gas required for a given call request. - /// - /// # Arguments - /// - /// * `req` - A `CallRequest` struct representing the call request to estimate gas for. - /// - /// # Returns - /// - /// A `Result` with a `Fee` representing the estimated gas related data. - pub fn estimate_gas_impl( - &self, - req: zksync_types::transaction_request::CallRequest, - ) -> jsonrpc_core::Result { - let mut request_with_gas_per_pubdata_overridden = req; - - if let Some(ref mut eip712_meta) = request_with_gas_per_pubdata_overridden.eip712_meta { - if eip712_meta.gas_per_pubdata == U256::zero() { - eip712_meta.gas_per_pubdata = MAX_GAS_PER_PUBDATA_BYTE.into(); - } - } - - let is_eip712 = request_with_gas_per_pubdata_overridden - .eip712_meta - .is_some(); - - let mut l2_tx = - match L2Tx::from_request(request_with_gas_per_pubdata_overridden.into(), MAX_TX_SIZE) { - Ok(tx) => tx, - Err(e) => { - let error = Web3Error::SerializationError(e); - return Err(into_jsrpc_error(error)); - } - }; - - let tx: Transaction = l2_tx.clone().into(); - let fair_l2_gas_price = L2_GAS_PRICE; - - // Calculate Adjusted L1 Price - let l1_gas_price = { - let current_l1_gas_price = - ((self.l1_gas_price as f64) * ESTIMATE_GAS_L1_GAS_PRICE_SCALE_FACTOR) as u64; - - // In order for execution to pass smoothly, we need to ensure that block's required gasPerPubdata will be - // <= to the one in the transaction itself. - adjust_l1_gas_price_for_tx( - current_l1_gas_price, - L2_GAS_PRICE, - tx.gas_per_pubdata_byte_limit(), - ) - }; - - let (base_fee, gas_per_pubdata_byte) = - derive_base_fee_and_gas_per_pubdata(l1_gas_price, fair_l2_gas_price); - - // Properly format signature - if l2_tx.common_data.signature.is_empty() { - l2_tx.common_data.signature = vec![0u8; 65]; - l2_tx.common_data.signature[64] = 27; - } - - // The user may not include the proper transaction type during the estimation of - // the gas fee. However, it is needed for the bootloader checks to pass properly. - if is_eip712 { - l2_tx.common_data.transaction_type = TransactionType::EIP712Transaction; - } - - l2_tx.common_data.fee.gas_per_pubdata_limit = MAX_GAS_PER_PUBDATA_BYTE.into(); - l2_tx.common_data.fee.max_fee_per_gas = base_fee.into(); - l2_tx.common_data.fee.max_priority_fee_per_gas = base_fee.into(); - - let mut storage_view = StorageView::new(&self.fork_storage); - - // Calculate gas_for_bytecodes_pubdata - let pubdata_for_factory_deps = l2_tx - .execute - .factory_deps - .as_deref() - .unwrap_or_default() - .iter() - .map(|bytecode| { - if storage_view.is_bytecode_known(&hash_bytecode(bytecode)) { - return 0; - } - - let length = if let Ok(compressed) = compress_bytecode(bytecode) { - compressed.len() - } else { - bytecode.len() - }; - length as u32 + ESTIMATE_GAS_PUBLISH_BYTE_OVERHEAD - }) - .sum::(); - - if pubdata_for_factory_deps > MAX_PUBDATA_PER_BLOCK { - return Err(into_jsrpc_error(Web3Error::SubmitTransactionError( - "exceeds limit for published pubdata".into(), - Default::default(), - ))); - } - - let gas_for_bytecodes_pubdata: u32 = - pubdata_for_factory_deps * (gas_per_pubdata_byte as u32); - - let storage = storage_view.to_rc_ptr(); - - let execution_mode = TxExecutionMode::EstimateFee; - let (mut batch_env, _) = self.create_l1_batch_env(storage.clone()); - batch_env.l1_gas_price = l1_gas_price; - let system_env = self.create_system_env( - self.system_contracts.contracts_for_fee_estimate().clone(), - execution_mode, - ); - - // We are using binary search to find the minimal values of gas_limit under which the transaction succeeds - let mut lower_bound = 0; - let mut upper_bound = MAX_L2_TX_GAS_LIMIT as u32; - let mut attempt_count = 1; - - tracing::trace!("Starting gas estimation loop"); - while lower_bound + ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION < upper_bound { - let mid = (lower_bound + upper_bound) / 2; - tracing::trace!( - "Attempt {} (lower_bound: {}, upper_bound: {}, mid: {})", - attempt_count, - lower_bound, - upper_bound, - mid - ); - let try_gas_limit = gas_for_bytecodes_pubdata + mid; - - let estimate_gas_result = InMemoryNodeInner::estimate_gas_step( - l2_tx.clone(), - gas_per_pubdata_byte, - try_gas_limit, - l1_gas_price, - batch_env.clone(), - system_env.clone(), - &self.fork_storage, - ); - - if estimate_gas_result.result.is_failed() { - tracing::trace!("Attempt {} FAILED", attempt_count); - lower_bound = mid + 1; - } else { - tracing::trace!("Attempt {} SUCCEEDED", attempt_count); - upper_bound = mid; - } - attempt_count += 1; - } - - tracing::trace!("Gas Estimation Values:"); - tracing::trace!(" Final upper_bound: {}", upper_bound); - tracing::trace!(" ESTIMATE_GAS_SCALE_FACTOR: {}", ESTIMATE_GAS_SCALE_FACTOR); - tracing::trace!(" MAX_L2_TX_GAS_LIMIT: {}", MAX_L2_TX_GAS_LIMIT); - let tx_body_gas_limit = cmp::min( - MAX_L2_TX_GAS_LIMIT as u32, - (upper_bound as f32 * ESTIMATE_GAS_SCALE_FACTOR) as u32, - ); - let suggested_gas_limit = tx_body_gas_limit + gas_for_bytecodes_pubdata; - - let estimate_gas_result = InMemoryNodeInner::estimate_gas_step( - l2_tx.clone(), - gas_per_pubdata_byte, - suggested_gas_limit, - l1_gas_price, - batch_env, - system_env, - &self.fork_storage, - ); - - let coefficients = OverheadCoeficients::from_tx_type(EIP_712_TX_TYPE); - let overhead: u32 = derive_overhead( - suggested_gas_limit, - gas_per_pubdata_byte as u32, - tx.encoding_len(), - coefficients, - ); - - match estimate_gas_result.result { - ExecutionResult::Revert { output } => { - tracing::info!("{}", format!("Unable to estimate gas for the request with our suggested gas limit of {}. The transaction is most likely unexecutable. Breakdown of estimation:", suggested_gas_limit + overhead).red()); - tracing::info!( - "{}", - format!( - "\tEstimated transaction body gas cost: {}", - tx_body_gas_limit - ) - .red() - ); - tracing::info!( - "{}", - format!("\tGas for pubdata: {}", gas_for_bytecodes_pubdata).red() - ); - tracing::info!("{}", format!("\tOverhead: {}", overhead).red()); - let message = output.to_string(); - let pretty_message = format!( - "execution reverted{}{}", - if message.is_empty() { "" } else { ": " }, - message - ); - let data = output.encoded_data(); - tracing::info!("{}", pretty_message.on_red()); - Err(into_jsrpc_error(Web3Error::SubmitTransactionError( - pretty_message, - data, - ))) - } - ExecutionResult::Halt { reason } => { - tracing::info!("{}", format!("Unable to estimate gas for the request with our suggested gas limit of {}. The transaction is most likely unexecutable. Breakdown of estimation:", suggested_gas_limit + overhead).red()); - tracing::info!( - "{}", - format!( - "\tEstimated transaction body gas cost: {}", - tx_body_gas_limit - ) - .red() - ); - tracing::info!( - "{}", - format!("\tGas for pubdata: {}", gas_for_bytecodes_pubdata).red() - ); - tracing::info!("{}", format!("\tOverhead: {}", overhead).red()); - let message = reason.to_string(); - let pretty_message = format!( - "execution reverted{}{}", - if message.is_empty() { "" } else { ": " }, - message - ); - - tracing::info!("{}", pretty_message.on_red()); - Err(into_jsrpc_error(Web3Error::SubmitTransactionError( - pretty_message, - vec![], - ))) - } - ExecutionResult::Success { .. } => { - let full_gas_limit = match tx_body_gas_limit - .overflowing_add(gas_for_bytecodes_pubdata + overhead) - { - (value, false) => value, - (_, true) => { - tracing::info!("{}", "Overflow when calculating gas estimation. We've exceeded the block gas limit by summing the following values:".red()); - tracing::info!( - "{}", - format!( - "\tEstimated transaction body gas cost: {}", - tx_body_gas_limit - ) - .red() - ); - tracing::info!( - "{}", - format!("\tGas for pubdata: {}", gas_for_bytecodes_pubdata).red() - ); - tracing::info!("{}", format!("\tOverhead: {}", overhead).red()); - return Err(into_jsrpc_error(Web3Error::SubmitTransactionError( - "exceeds block gas limit".into(), - Default::default(), - ))); - } - }; - - tracing::trace!("Gas Estimation Results"); - tracing::trace!(" tx_body_gas_limit: {}", tx_body_gas_limit); - tracing::trace!(" gas_for_bytecodes_pubdata: {}", gas_for_bytecodes_pubdata); - tracing::trace!(" overhead: {}", overhead); - tracing::trace!(" full_gas_limit: {}", full_gas_limit); - let fee = Fee { - max_fee_per_gas: base_fee.into(), - max_priority_fee_per_gas: 0u32.into(), - gas_limit: full_gas_limit.into(), - gas_per_pubdata_limit: gas_per_pubdata_byte.into(), - }; - Ok(fee) - } - } - } - - /// Runs fee estimation against a sandbox vm with the given gas_limit. - #[allow(clippy::too_many_arguments)] - fn estimate_gas_step( - mut l2_tx: L2Tx, - gas_per_pubdata_byte: u64, - tx_gas_limit: u32, - l1_gas_price: u64, - mut batch_env: L1BatchEnv, - system_env: SystemEnv, - fork_storage: &ForkStorage, - ) -> VmExecutionResultAndLogs { - let tx: Transaction = l2_tx.clone().into(); - let l1_gas_price = - adjust_l1_gas_price_for_tx(l1_gas_price, L2_GAS_PRICE, tx.gas_per_pubdata_byte_limit()); - - let coefficients = OverheadCoeficients::from_tx_type(EIP_712_TX_TYPE); - // Set gas_limit for transaction - let gas_limit_with_overhead = tx_gas_limit - + derive_overhead( - tx_gas_limit, - gas_per_pubdata_byte as u32, - tx.encoding_len(), - coefficients, - ); - l2_tx.common_data.fee.gas_limit = gas_limit_with_overhead.into(); - - let storage = StorageView::new(fork_storage).to_rc_ptr(); - - // The nonce needs to be updated - let nonce = l2_tx.nonce(); - let nonce_key = get_nonce_key(&l2_tx.initiator_account()); - let full_nonce = storage.borrow_mut().read_value(&nonce_key); - let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); - let enforced_full_nonce = nonces_to_full_nonce(U256::from(nonce.0), deployment_nonce); - storage - .borrow_mut() - .set_value(nonce_key, u256_to_h256(enforced_full_nonce)); - - // We need to explicitly put enough balance into the account of the users - let payer = l2_tx.payer(); - let balance_key = storage_key_for_eth_balance(&payer); - let mut current_balance = h256_to_u256(storage.borrow_mut().read_value(&balance_key)); - let added_balance = l2_tx.common_data.fee.gas_limit * l2_tx.common_data.fee.max_fee_per_gas; - current_balance += added_balance; - storage - .borrow_mut() - .set_value(balance_key, u256_to_h256(current_balance)); - - batch_env.l1_gas_price = l1_gas_price; - - let mut vm = Vm::new(batch_env, system_env, storage, HistoryDisabled); - - let tx: Transaction = l2_tx.into(); - vm.push_transaction(tx); - - vm.execute(VmExecutionMode::OneTx) - } - - /// Sets the `impersonated_account` field of the node. - /// This field is used to override the `tx.initiator_account` field of the transaction in the `run_l2_tx` method. - pub fn set_impersonated_account(&mut self, address: Address) -> bool { - self.impersonated_accounts.insert(address) - } - - /// Clears the `impersonated_account` field of the node. - pub fn stop_impersonating_account(&mut self, address: Address) -> bool { - self.impersonated_accounts.remove(&address) - } - - /// Archives the current state for later queries. - pub fn archive_state(&mut self) -> Result<(), String> { - if self.previous_states.len() > MAX_PREVIOUS_STATES as usize { - if let Some(entry) = self.previous_states.shift_remove_index(0) { - tracing::debug!("removing archived state for previous block {:#x}", entry.0); - } - } - tracing::debug!( - "archiving state for {:#x} #{}", - self.current_miniblock_hash, - self.current_miniblock - ); - self.previous_states.insert( - self.current_miniblock_hash, - self.fork_storage - .inner - .read() - .map_err(|err| err.to_string())? - .raw_storage - .state - .clone(), - ); - - Ok(()) - } - - /// Creates a [Snapshot] of the current state of the node. - pub fn snapshot(&self) -> Result { - let storage = self - .fork_storage - .inner - .read() - .map_err(|err| format!("failed acquiring read lock on storage: {:?}", err))?; - - Ok(Snapshot { - current_timestamp: self.current_timestamp, - current_batch: self.current_batch, - current_miniblock: self.current_miniblock, - current_miniblock_hash: self.current_miniblock_hash, - l1_gas_price: self.l1_gas_price, - tx_results: self.tx_results.clone(), - blocks: self.blocks.clone(), - block_hashes: self.block_hashes.clone(), - filters: self.filters.clone(), - impersonated_accounts: self.impersonated_accounts.clone(), - rich_accounts: self.rich_accounts.clone(), - previous_states: self.previous_states.clone(), - raw_storage: storage.raw_storage.clone(), - value_read_cache: storage.value_read_cache.clone(), - factory_dep_cache: storage.factory_dep_cache.clone(), - }) - } - - /// Restores a previously created [Snapshot] of the node. - pub fn restore_snapshot(&mut self, snapshot: Snapshot) -> Result<(), String> { - let mut storage = self - .fork_storage - .inner - .write() - .map_err(|err| format!("failed acquiring write lock on storage: {:?}", err))?; - - self.current_timestamp = snapshot.current_timestamp; - self.current_batch = snapshot.current_batch; - self.current_miniblock = snapshot.current_miniblock; - self.current_miniblock_hash = snapshot.current_miniblock_hash; - self.l1_gas_price = snapshot.l1_gas_price; - self.tx_results = snapshot.tx_results; - self.blocks = snapshot.blocks; - self.block_hashes = snapshot.block_hashes; - self.filters = snapshot.filters; - self.impersonated_accounts = snapshot.impersonated_accounts; - self.rich_accounts = snapshot.rich_accounts; - self.previous_states = snapshot.previous_states; - storage.raw_storage = snapshot.raw_storage; - storage.value_read_cache = snapshot.value_read_cache; - storage.factory_dep_cache = snapshot.factory_dep_cache; - - Ok(()) - } -} - -/// Creates a restorable snapshot for the [InMemoryNodeInner]. The snapshot contains all the necessary -/// data required to restore the [InMemoryNodeInner] state to a previous point in time. -#[derive(Debug, Clone)] -pub struct Snapshot { - pub(crate) current_timestamp: u64, - pub(crate) current_batch: u32, - pub(crate) current_miniblock: u64, - pub(crate) current_miniblock_hash: H256, - pub(crate) l1_gas_price: u64, - pub(crate) tx_results: HashMap, - pub(crate) blocks: HashMap>, - pub(crate) block_hashes: HashMap, - pub(crate) filters: EthFilters, - pub(crate) impersonated_accounts: HashSet
, - pub(crate) rich_accounts: HashSet, - pub(crate) previous_states: IndexMap>, - pub(crate) raw_storage: InMemoryStorage, - pub(crate) value_read_cache: HashMap, - pub(crate) factory_dep_cache: HashMap>>, -} - -/// Defines the configuration parameters for the [InMemoryNode]. -#[derive(Default, Debug, Clone)] -pub struct InMemoryNodeConfig { - pub show_calls: ShowCalls, - pub show_storage_logs: ShowStorageLogs, - pub show_vm_details: ShowVMDetails, - pub show_gas_details: ShowGasDetails, - pub resolve_hashes: bool, - pub system_contracts_options: system_contracts::Options, -} - -/// In-memory node, that can be used for local & unit testing. -/// It also supports the option of forking testnet/mainnet. -/// All contents are removed when object is destroyed. -pub struct InMemoryNode { - inner: Arc>>, -} - -fn contract_address_from_tx_result(execution_result: &VmExecutionResultAndLogs) -> Option { - for query in execution_result.logs.storage_logs.iter().rev() { - if query.log_type == StorageLogQueryType::InitialWrite - && query.log_query.address == ACCOUNT_CODE_STORAGE_ADDRESS - { - return Some(h256_to_account_address(&u256_to_h256(query.log_query.key))); - } - } - None -} - -impl Default for InMemoryNode { - fn default() -> Self { - InMemoryNode::new(None, None, InMemoryNodeConfig::default()) - } -} - -impl InMemoryNode { - pub fn new( - fork: Option>, - observability: Option, - config: InMemoryNodeConfig, - ) -> Self { - let inner = if let Some(f) = &fork { - let mut block_hashes = HashMap::::new(); - block_hashes.insert(f.l2_block.number.as_u64(), f.l2_block.hash); - let mut blocks = HashMap::>::new(); - blocks.insert(f.l2_block.hash, f.l2_block.clone()); - - InMemoryNodeInner { - current_timestamp: f.block_timestamp, - current_batch: f.l1_block.0, - current_miniblock: f.l2_miniblock, - current_miniblock_hash: f.l2_miniblock_hash, - l1_gas_price: f.l1_gas_price, - tx_results: Default::default(), - blocks, - block_hashes, - filters: Default::default(), - fork_storage: ForkStorage::new(fork, &config.system_contracts_options), - show_calls: config.show_calls, - show_storage_logs: config.show_storage_logs, - show_vm_details: config.show_vm_details, - show_gas_details: config.show_gas_details, - resolve_hashes: config.resolve_hashes, - console_log_handler: ConsoleLogHandler::default(), - system_contracts: SystemContracts::from_options(&config.system_contracts_options), - impersonated_accounts: Default::default(), - rich_accounts: HashSet::new(), - previous_states: Default::default(), - observability, - } - } else { - let mut block_hashes = HashMap::::new(); - block_hashes.insert(0, H256::zero()); - let mut blocks = HashMap::>::new(); - blocks.insert( - H256::zero(), - create_empty_block(0, NON_FORK_FIRST_BLOCK_TIMESTAMP, 0), - ); - - InMemoryNodeInner { - current_timestamp: NON_FORK_FIRST_BLOCK_TIMESTAMP, - current_batch: 0, - current_miniblock: 0, - current_miniblock_hash: H256::zero(), - l1_gas_price: L1_GAS_PRICE, - tx_results: Default::default(), - blocks, - block_hashes, - filters: Default::default(), - fork_storage: ForkStorage::new(fork, &config.system_contracts_options), - show_calls: config.show_calls, - show_storage_logs: config.show_storage_logs, - show_vm_details: config.show_vm_details, - show_gas_details: config.show_gas_details, - resolve_hashes: config.resolve_hashes, - console_log_handler: ConsoleLogHandler::default(), - system_contracts: SystemContracts::from_options(&config.system_contracts_options), - impersonated_accounts: Default::default(), - rich_accounts: HashSet::new(), - previous_states: Default::default(), - observability, - } - }; - - InMemoryNode { - inner: Arc::new(RwLock::new(inner)), - } - } - - pub fn get_inner(&self) -> Arc>> { - self.inner.clone() - } - - /// Applies multiple transactions - but still one per L1 batch. - pub fn apply_txs(&self, txs: Vec) -> Result<(), String> { - tracing::info!("Running {:?} transactions (one per batch)", txs.len()); - - for tx in txs { - self.run_l2_tx(tx, TxExecutionMode::VerifyExecute)?; - } - - Ok(()) - } - - /// Adds a lot of tokens to a given account. - pub fn set_rich_account(&self, address: H160) { - let key = storage_key_for_eth_balance(&address); - - let mut inner = match self.inner.write() { - Ok(guard) => guard, - Err(e) => { - tracing::info!("Failed to acquire write lock: {}", e); - return; - } - }; - - let keys = { - let mut storage_view = StorageView::new(&inner.fork_storage); - storage_view.set_value(key, u256_to_h256(U256::from(10u128.pow(30)))); - storage_view.modified_storage_keys().clone() - }; - - for (key, value) in keys.iter() { - inner.fork_storage.set_value(*key, *value); - } - inner.rich_accounts.insert(address); - } - - /// Runs L2 'eth call' method - that doesn't commit to a block. - fn run_l2_call(&self, mut l2_tx: L2Tx) -> Result { - let execution_mode = TxExecutionMode::EthCall; - - let inner = self - .inner - .write() - .map_err(|e| format!("Failed to acquire write lock: {}", e))?; - - let storage = StorageView::new(&inner.fork_storage).to_rc_ptr(); - - let bootloader_code = inner.system_contracts.contracts_for_l2_call(); - - // init vm - - let (batch_env, _) = inner.create_l1_batch_env(storage.clone()); - let system_env = inner.create_system_env(bootloader_code.clone(), execution_mode); - - let mut vm = Vm::new(batch_env, system_env, storage, HistoryDisabled); - - // We must inject *some* signature (otherwise bootloader code fails to generate hash). - if l2_tx.common_data.signature.is_empty() { - l2_tx.common_data.signature = PackedEthSignature::default().serialize_packed().into(); - } - - let tx: Transaction = l2_tx.into(); - vm.push_transaction(tx); - - let call_tracer_result = Arc::new(OnceCell::default()); - - let custom_tracers = - vec![ - Box::new(CallTracer::new(call_tracer_result.clone(), HistoryDisabled)) - as Box>, HistoryDisabled>>, - ]; - - let tx_result = vm.inspect(custom_tracers, VmExecutionMode::OneTx); - - let call_traces = Arc::try_unwrap(call_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - match &tx_result.result { - ExecutionResult::Success { output } => { - tracing::info!("Call: {}", "SUCCESS".green()); - let output_bytes = zksync_basic_types::Bytes::from(output.clone()); - tracing::info!("Output: {}", serde_json::to_string(&output_bytes).unwrap()); - } - ExecutionResult::Revert { output } => { - tracing::info!("Call: {}: {}", "FAILED".red(), output); - } - ExecutionResult::Halt { reason } => { - tracing::info!("Call: {} {}", "HALTED".red(), reason) - } - }; - - tracing::info!("=== Console Logs: "); - for call in &call_traces { - inner.console_log_handler.handle_call_recursive(call); - } - - tracing::info!("=== Call traces:"); - for call in &call_traces { - formatter::print_call(call, 0, &inner.show_calls, inner.resolve_hashes); - } - - Ok(tx_result.result) - } - - fn display_detailed_gas_info( - &self, - bootloader_debug_result: Option<&eyre::Result>, - spent_on_pubdata: u32, - ) -> eyre::Result<(), String> { - if let Some(bootloader_result) = bootloader_debug_result { - let bootloader_debug = bootloader_result.clone()?; - - tracing::info!("┌─────────────────────────┐"); - tracing::info!("│ GAS DETAILS │"); - tracing::info!("└─────────────────────────┘"); - - // Total amount of gas (should match tx.gas_limit). - let total_gas_limit = bootloader_debug - .total_gas_limit_from_user - .saturating_sub(bootloader_debug.reserved_gas); - - let intrinsic_gas = total_gas_limit - bootloader_debug.gas_limit_after_intrinsic; - let gas_for_validation = - bootloader_debug.gas_limit_after_intrinsic - bootloader_debug.gas_after_validation; - - let gas_spent_on_compute = bootloader_debug.gas_spent_on_execution - - bootloader_debug.gas_spent_on_bytecode_preparation; - - let gas_used = intrinsic_gas - + gas_for_validation - + bootloader_debug.gas_spent_on_bytecode_preparation - + gas_spent_on_compute; - - tracing::info!( - "Gas - Limit: {} | Used: {} | Refunded: {}", - to_human_size(total_gas_limit), - to_human_size(gas_used), - to_human_size(bootloader_debug.refund_by_operator) - ); - - if bootloader_debug.total_gas_limit_from_user != total_gas_limit { - tracing::info!( - "{}", - format!( - " WARNING: user actually provided more gas {}, but system had a lower max limit.", - to_human_size(bootloader_debug.total_gas_limit_from_user) - ) - .yellow() - ); - } - if bootloader_debug.refund_computed != bootloader_debug.refund_by_operator { - tracing::info!( - "{}", - format!( - " WARNING: Refund by VM: {}, but operator refunded more: {}", - to_human_size(bootloader_debug.refund_computed), - to_human_size(bootloader_debug.refund_by_operator) - ) - .yellow() - ); - } - - if bootloader_debug.refund_computed + gas_used != total_gas_limit { - tracing::info!( - "{}", - format!( - " WARNING: Gas totals don't match. {} != {} , delta: {}", - to_human_size(bootloader_debug.refund_computed + gas_used), - to_human_size(total_gas_limit), - to_human_size( - total_gas_limit.abs_diff(bootloader_debug.refund_computed + gas_used) - ) - ) - .yellow() - ); - } - - let bytes_published = spent_on_pubdata / bootloader_debug.gas_per_pubdata.as_u32(); - - tracing::info!( - "During execution published {} bytes to L1, @{} each - in total {} gas", - to_human_size(bytes_published.into()), - to_human_size(bootloader_debug.gas_per_pubdata), - to_human_size(spent_on_pubdata.into()) - ); - - tracing::info!("Out of {} gas used, we spent:", to_human_size(gas_used)); - tracing::info!( - " {:>15} gas ({:>2}%) for transaction setup", - to_human_size(intrinsic_gas), - to_human_size(intrinsic_gas * 100 / gas_used) - ); - tracing::info!( - " {:>15} gas ({:>2}%) for bytecode preparation (decompression etc)", - to_human_size(bootloader_debug.gas_spent_on_bytecode_preparation), - to_human_size(bootloader_debug.gas_spent_on_bytecode_preparation * 100 / gas_used) - ); - tracing::info!( - " {:>15} gas ({:>2}%) for account validation", - to_human_size(gas_for_validation), - to_human_size(gas_for_validation * 100 / gas_used) - ); - tracing::info!( - " {:>15} gas ({:>2}%) for computations (opcodes)", - to_human_size(gas_spent_on_compute), - to_human_size(gas_spent_on_compute * 100 / gas_used) - ); - - tracing::info!(""); - tracing::info!(""); - tracing::info!( - "{}", - "=== Transaction setup cost breakdown ===".to_owned().bold(), - ); - - tracing::info!("Total cost: {}", to_human_size(intrinsic_gas).bold()); - tracing::info!( - " {:>15} gas ({:>2}%) fixed cost", - to_human_size(bootloader_debug.intrinsic_overhead), - to_human_size(bootloader_debug.intrinsic_overhead * 100 / intrinsic_gas) - ); - tracing::info!( - " {:>15} gas ({:>2}%) operator cost", - to_human_size(bootloader_debug.operator_overhead), - to_human_size(bootloader_debug.operator_overhead * 100 / intrinsic_gas) - ); - - tracing::info!(""); - tracing::info!( - " FYI: operator could have charged up to: {}, so you got {}% discount", - to_human_size(bootloader_debug.required_overhead), - to_human_size( - (bootloader_debug.required_overhead - bootloader_debug.operator_overhead) * 100 - / bootloader_debug.required_overhead - ) - ); - - let publish_block_l1_bytes = BLOCK_OVERHEAD_PUBDATA; - tracing::info!( - "Publishing full block costs the operator up to: {}, where {} is due to {} bytes published to L1", - to_human_size(bootloader_debug.total_overhead_for_block), - to_human_size(bootloader_debug.gas_per_pubdata * publish_block_l1_bytes), - to_human_size(publish_block_l1_bytes.into()) - ); - tracing::info!("Your transaction has contributed to filling up the block in the following way (we take the max contribution as the cost):"); - tracing::info!( - " Circuits overhead:{:>15} ({}% of the full block: {})", - to_human_size(bootloader_debug.overhead_for_circuits), - to_human_size( - bootloader_debug.overhead_for_circuits * 100 - / bootloader_debug.total_overhead_for_block - ), - to_human_size(bootloader_debug.total_overhead_for_block) - ); - tracing::info!( - " Length overhead: {:>15}", - to_human_size(bootloader_debug.overhead_for_length) - ); - tracing::info!( - " Slot overhead: {:>15}", - to_human_size(bootloader_debug.overhead_for_slot) - ); - Ok(()) - } else { - Err("Booloader tracer didn't finish.".to_owned()) - } - } - - /// Executes the given L2 transaction and returns all the VM logs. - pub fn run_l2_tx_inner( - &self, - l2_tx: L2Tx, - execution_mode: TxExecutionMode, - ) -> Result { - let inner = self - .inner - .write() - .map_err(|e| format!("Failed to acquire write lock: {}", e))?; - - let storage = StorageView::new(&inner.fork_storage).to_rc_ptr(); - - let (batch_env, block_ctx) = inner.create_l1_batch_env(storage.clone()); - - // if we are impersonating an account, we need to use non-verifying system contracts - let nonverifying_contracts; - let bootloader_code = { - if inner - .impersonated_accounts - .contains(&l2_tx.common_data.initiator_address) - { - tracing::info!( - "🕵️ Executing tx from impersonated account {:?}", - l2_tx.common_data.initiator_address - ); - nonverifying_contracts = - SystemContracts::from_options(&Options::BuiltInWithoutSecurity); - nonverifying_contracts.contracts(execution_mode) - } else { - inner.system_contracts.contracts(execution_mode) - } - }; - let system_env = inner.create_system_env(bootloader_code.clone(), execution_mode); - - let mut vm = Vm::new( - batch_env.clone(), - system_env, - storage.clone(), - HistoryDisabled, - ); - - let tx: Transaction = l2_tx.clone().into(); - - vm.push_transaction(tx.clone()); - - let call_tracer_result = Arc::new(OnceCell::default()); - let bootloader_debug_result = Arc::new(OnceCell::default()); - - let custom_tracers = vec![ - Box::new(CallTracer::new(call_tracer_result.clone(), HistoryDisabled)) - as Box>, HistoryDisabled>>, - Box::new(BootloaderDebugTracer { - result: bootloader_debug_result.clone(), - }) as Box>, HistoryDisabled>>, - ]; - - let tx_result = vm.inspect(custom_tracers, VmExecutionMode::OneTx); - - let call_traces = call_tracer_result.get().unwrap(); - - let spent_on_pubdata = - tx_result.statistics.gas_used - tx_result.statistics.computational_gas_used; - - tracing::info!("┌─────────────────────────┐"); - tracing::info!("│ TRANSACTION SUMMARY │"); - tracing::info!("└─────────────────────────┘"); - - match &tx_result.result { - ExecutionResult::Success { .. } => tracing::info!("Transaction: {}", "SUCCESS".green()), - ExecutionResult::Revert { .. } => tracing::info!("Transaction: {}", "FAILED".red()), - ExecutionResult::Halt { .. } => tracing::info!("Transaction: {}", "HALTED".red()), - } - - tracing::info!("Initiator: {:?}", tx.initiator_account()); - tracing::info!("Payer: {:?}", tx.payer()); - tracing::info!( - "Gas - Limit: {} | Used: {} | Refunded: {}", - to_human_size(tx.gas_limit()), - to_human_size(tx.gas_limit() - tx_result.refunds.gas_refunded), - to_human_size(tx_result.refunds.gas_refunded.into()) - ); - - match inner.show_gas_details { - ShowGasDetails::None => tracing::info!( - "Use --show-gas-details flag or call config_setShowGasDetails to display more info" - ), - ShowGasDetails::All => { - if self - .display_detailed_gas_info(bootloader_debug_result.get(), spent_on_pubdata) - .is_err() - { - tracing::info!( - "{}", - "!!! FAILED TO GET DETAILED GAS INFO !!!".to_owned().red() - ); - } - } - } - - if inner.show_storage_logs != ShowStorageLogs::None { - tracing::info!(""); - tracing::info!("┌──────────────────┐"); - tracing::info!("│ STORAGE LOGS │"); - tracing::info!("└──────────────────┘"); - } - - for log_query in &tx_result.logs.storage_logs { - match inner.show_storage_logs { - ShowStorageLogs::Write => { - if matches!( - log_query.log_type, - StorageLogQueryType::RepeatedWrite | StorageLogQueryType::InitialWrite - ) { - formatter::print_logs(log_query); - } - } - ShowStorageLogs::Read => { - if log_query.log_type == StorageLogQueryType::Read { - formatter::print_logs(log_query); - } - } - ShowStorageLogs::All => { - formatter::print_logs(log_query); - } - _ => {} - } - } - - if inner.show_vm_details != ShowVMDetails::None { - formatter::print_vm_details(&tx_result); - } - - tracing::info!(""); - tracing::info!("==== Console logs: "); - for call in call_traces { - inner.console_log_handler.handle_call_recursive(call); - } - tracing::info!(""); - let call_traces_count = if !call_traces.is_empty() { - // All calls/sub-calls are stored within the first call trace - call_traces[0].calls.len() - } else { - 0 - }; - tracing::info!( - "==== {} Use --show-calls flag or call config_setShowCalls to display more info.", - format!("{:?} call traces. ", call_traces_count).bold() - ); - - if inner.show_calls != ShowCalls::None { - for call in call_traces { - formatter::print_call(call, 0, &inner.show_calls, inner.resolve_hashes); - } - } - tracing::info!(""); - tracing::info!( - "==== {}", - format!("{} events", tx_result.logs.events.len()).bold() - ); - for event in &tx_result.logs.events { - formatter::print_event(event, inner.resolve_hashes); - } - - // The computed block hash here will be different than that in production. - let hash = compute_hash(block_ctx.miniblock, l2_tx.hash()); - - let mut transaction = zksync_types::api::Transaction::from(l2_tx); - let block_hash = inner - .block_hashes - .get(&inner.current_miniblock) - .ok_or(format!( - "Block hash not found for block: {}", - inner.current_miniblock - ))?; - transaction.block_hash = Some(*block_hash); - transaction.block_number = Some(U64::from(inner.current_miniblock)); - - let block = Block { - hash, - number: U64::from(block_ctx.miniblock), - timestamp: U256::from(batch_env.timestamp), - l1_batch_number: Some(U64::from(batch_env.number.0)), - transactions: vec![TransactionVariant::Full(transaction)], - gas_used: U256::from(tx_result.statistics.gas_used), - gas_limit: U256::from(BLOCK_GAS_LIMIT), - ..Default::default() - }; - - tracing::info!(""); - tracing::info!(""); - - let bytecodes = vm - .get_last_tx_compressed_bytecodes() - .iter() - .map(|b| bytecode_to_factory_dep(b.original.clone())) - .collect(); - - vm.execute(VmExecutionMode::Bootloader); - - let modified_keys = storage.borrow().modified_storage_keys().clone(); - Ok(( - modified_keys, - tx_result, - call_traces.clone(), - block, - bytecodes, - block_ctx, - )) - } - - /// Runs L2 transaction and commits it to a new block. - fn run_l2_tx(&self, l2_tx: L2Tx, execution_mode: TxExecutionMode) -> Result<(), String> { - let tx_hash = l2_tx.hash(); - tracing::info!(""); - tracing::info!("Executing {}", format!("{:?}", tx_hash).bold()); - - { - let mut inner = self - .inner - .write() - .map_err(|e| format!("Failed to acquire write lock: {}", e))?; - inner.filters.notify_new_pending_transaction(tx_hash); - } - - let (keys, result, call_traces, block, bytecodes, block_ctx) = - self.run_l2_tx_inner(l2_tx.clone(), execution_mode)?; - - if let ExecutionResult::Halt { reason } = result.result { - // Halt means that something went really bad with the transaction execution (in most cases invalid signature, - // but it could also be bootloader panic etc). - // In such case, we should not persist the VM data, and we should pretend that transaction never existed. - return Err(format!("Transaction HALT: {}", reason)); - } - // Write all the mutated keys (storage slots). - let mut inner = self - .inner - .write() - .map_err(|e| format!("Failed to acquire write lock: {}", e))?; - for (key, value) in keys.iter() { - inner.fork_storage.set_value(*key, *value); - } - - // Write all the factory deps. - for (hash, code) in bytecodes.iter() { - inner.fork_storage.store_factory_dep( - u256_to_h256(*hash), - code.iter() - .flat_map(|entry| { - let mut bytes = vec![0u8; 32]; - entry.to_big_endian(&mut bytes); - bytes.to_vec() - }) - .collect(), - ) - } - - for (log_idx, event) in result.logs.events.iter().enumerate() { - inner.filters.notify_new_log( - &Log { - address: event.address, - topics: event.indexed_topics.clone(), - data: Bytes(event.value.clone()), - block_hash: Some(block.hash), - block_number: Some(block.number), - l1_batch_number: block.l1_batch_number, - transaction_hash: Some(tx_hash), - transaction_index: Some(U64::zero()), - log_index: Some(U256::from(log_idx)), - transaction_log_index: Some(U256::from(log_idx)), - log_type: None, - removed: None, - }, - block.number, - ); - } - let tx_receipt = TransactionReceipt { - transaction_hash: tx_hash, - transaction_index: U64::from(0), - block_hash: Some(block.hash), - block_number: Some(block.number), - l1_batch_tx_index: None, - l1_batch_number: block.l1_batch_number, - from: l2_tx.initiator_account(), - to: Some(l2_tx.recipient_account()), - cumulative_gas_used: Default::default(), - gas_used: Some(l2_tx.common_data.fee.gas_limit - result.refunds.gas_refunded), - contract_address: contract_address_from_tx_result(&result), - logs: result - .logs - .events - .iter() - .enumerate() - .map(|(log_idx, log)| Log { - address: log.address, - topics: log.indexed_topics.clone(), - data: Bytes(log.value.clone()), - block_hash: Some(block.hash), - block_number: Some(block.number), - l1_batch_number: block.l1_batch_number, - transaction_hash: Some(tx_hash), - transaction_index: Some(U64::zero()), - log_index: Some(U256::from(log_idx)), - transaction_log_index: Some(U256::from(log_idx)), - log_type: None, - removed: None, - }) - .collect(), - l2_to_l1_logs: vec![], - status: Some(if result.result.is_failed() { - U64::from(0) - } else { - U64::from(1) - }), - effective_gas_price: Some(L2_GAS_PRICE.into()), - ..Default::default() - }; - let debug = create_debug_output(&l2_tx, &result, call_traces).expect("create debug output"); // OK to unwrap here as Halt is handled above - inner.tx_results.insert( - tx_hash, - TransactionResult { - info: TxExecutionInfo { - tx: l2_tx, - batch_number: block.l1_batch_number.unwrap_or_default().as_u32(), - miniblock_number: block.number.as_u64(), - result, - }, - receipt: tx_receipt, - debug, - }, - ); - - // With the introduction of 'l2 blocks' (and virtual blocks), - // we are adding one l2 block at the end of each batch (to handle things like remaining events etc). - // You can look at insert_fictive_l2_block function in VM to see how this fake block is inserted. - let block_ctx = block_ctx.new_block(); - let empty_block_at_end_of_batch = - create_empty_block(block_ctx.miniblock, block_ctx.timestamp, block_ctx.batch); - - inner.current_batch = inner.current_batch.saturating_add(1); - - for block in vec![block, empty_block_at_end_of_batch] { - // archive current state before we produce new batch/blocks - if let Err(err) = inner.archive_state() { - tracing::error!( - "failed archiving state for block {}: {}", - inner.current_miniblock, - err - ); - } - - inner.current_miniblock = inner.current_miniblock.saturating_add(1); - inner.current_timestamp = inner.current_timestamp.saturating_add(1); - - let actual_l1_batch_number = block - .l1_batch_number - .expect("block must have a l1_batch_number"); - if actual_l1_batch_number.as_u32() != inner.current_batch { - panic!( - "expected next block to have batch_number {}, got {}", - inner.current_batch, - actual_l1_batch_number.as_u32() - ); - } - - if block.number.as_u64() != inner.current_miniblock { - panic!( - "expected next block to have miniblock {}, got {}", - inner.current_miniblock, - block.number.as_u64() - ); - } - - if block.timestamp.as_u64() != inner.current_timestamp { - panic!( - "expected next block to have timestamp {}, got {}", - inner.current_timestamp, - block.timestamp.as_u64() - ); - } - - let block_hash = block.hash; - inner.current_miniblock_hash = block_hash; - inner.block_hashes.insert(block.number.as_u64(), block.hash); - inner.blocks.insert(block.hash, block); - inner.filters.notify_new_block(block_hash); - } - - Ok(()) - } -} - -/// Keeps track of a block's batch number, miniblock number and timestamp. -/// Useful for keeping track of the current context when creating multiple blocks. -pub struct BlockContext { - pub batch: u32, - pub miniblock: u64, - pub timestamp: u64, -} - -impl BlockContext { - /// Create the current instance that represents the latest block. - pub fn from_current(batch: u32, miniblock: u64, timestamp: u64) -> Self { - Self { - batch, - miniblock, - timestamp, - } - } - - /// Create the next batch instance that has all parameters incremented by `1`. - pub fn new_batch(&self) -> Self { - Self { - batch: self.batch.saturating_add(1), - miniblock: self.miniblock.saturating_add(1), - timestamp: self.timestamp.saturating_add(1), - } - } - - /// Create the next batch instance that uses the same batch number, and has all other parameters incremented by `1`. - pub fn new_block(&self) -> BlockContext { - Self { - batch: self.batch, - miniblock: self.miniblock.saturating_add(1), - timestamp: self.timestamp.saturating_add(1), - } - } -} +use crate::{ + filters::{FilterType, LogFilter}, + fork::ForkSource, + namespaces::{EthNamespaceT, RpcResult}, + node::{InMemoryNode, TransactionResult, L2_GAS_PRICE, MAX_TX_SIZE, PROTOCOL_VERSION}, + utils::{self, not_implemented, IntoBoxedFuture}, +}; -impl EthNamespaceT for InMemoryNode { +impl EthNamespaceT + for InMemoryNode +{ /// Returns the chain ID of the node. - fn chain_id(&self) -> jsonrpc_core::BoxFuture> { - match self.inner.read() { + fn chain_id(&self) -> RpcResult { + match self.get_inner().read() { Ok(inner) => Ok(U64::from(inner.fork_storage.chain_id.as_u64())).into_boxed_future(), Err(_) => Err(into_jsrpc_error(Web3Error::InternalError)).into_boxed_future(), } @@ -1674,8 +55,8 @@ impl EthNamespaceT for fn call( &self, req: zksync_types::transaction_request::CallRequest, - _block: Option, - ) -> jsonrpc_core::BoxFuture> { + _block: Option, + ) -> RpcResult { match L2Tx::from_request(req.into(), MAX_TX_SIZE) { Ok(mut tx) => { tx.common_data.fee.gas_limit = ETH_CALL_GAS_LIMIT.into(); @@ -1742,12 +123,8 @@ impl EthNamespaceT for /// # Returns /// /// A `BoxFuture` that resolves to a `Result` containing the balance of the specified address as a `U256` or a `jsonrpc_core::Error` if an error occurred. - fn get_balance( - &self, - address: zksync_basic_types::Address, - _block: Option, - ) -> BoxFuture> { - let inner = Arc::clone(&self.inner); + fn get_balance(&self, address: Address, _block: Option) -> RpcResult { + let inner = self.get_inner().clone(); Box::pin(async move { let balance_key = storage_key_for_standard_token_balance( @@ -1777,17 +154,13 @@ impl EthNamespaceT for /// /// # Returns /// - /// A `BoxFuture` containing a `jsonrpc_core::Result` that resolves to an `Option` of `zksync_types::api::Block`. + /// A `BoxFuture` containing a `jsonrpc_core::Result` that resolves to an `Option` of `Block`. fn get_block_by_number( &self, - block_number: zksync_types::api::BlockNumber, + block_number: BlockNumber, full_transactions: bool, - ) -> BoxFuture< - jsonrpc_core::Result< - Option>, - >, - > { - let inner = Arc::clone(&self.inner); + ) -> RpcResult>> { + let inner = self.get_inner().clone(); Box::pin(async move { let maybe_block = { @@ -1869,9 +242,9 @@ impl EthNamespaceT for fn get_code( &self, address: zksync_basic_types::Address, - _block: Option, - ) -> BoxFuture> { - let inner = Arc::clone(&self.inner); + _block: Option, + ) -> RpcResult { + let inner = self.get_inner().clone(); Box::pin(async move { let code_key = get_code_key(&address); @@ -1905,9 +278,9 @@ impl EthNamespaceT for fn get_transaction_count( &self, address: zksync_basic_types::Address, - _block: Option, - ) -> BoxFuture> { - let inner = Arc::clone(&self.inner); + _block: Option, + ) -> RpcResult { + let inner = self.get_inner().clone(); Box::pin(async move { let nonce_key = get_nonce_key(&address); @@ -1934,8 +307,8 @@ impl EthNamespaceT for fn get_transaction_receipt( &self, hash: zksync_basic_types::H256, - ) -> BoxFuture>> { - let inner = Arc::clone(&self.inner); + ) -> RpcResult> { + let inner = self.get_inner().clone(); Box::pin(async move { let reader = match inner.read() { @@ -1963,8 +336,8 @@ impl EthNamespaceT for fn send_raw_transaction( &self, tx_bytes: zksync_basic_types::Bytes, - ) -> jsonrpc_core::BoxFuture> { - let chain_id = match self.inner.read() { + ) -> RpcResult { + let chain_id = match self.get_inner().read() { Ok(reader) => reader.fork_storage.chain_id, Err(_) => { return futures::future::err(into_jsrpc_error(Web3Error::InternalError)).boxed() @@ -2017,17 +390,13 @@ impl EthNamespaceT for /// /// # Returns /// - /// A `BoxFuture` containing a `jsonrpc_core::Result` that resolves to an `Option` of `zksync_types::api::Block`. + /// A `BoxFuture` containing a `jsonrpc_core::Result` that resolves to an `Option` of `Block`. fn get_block_by_hash( &self, hash: zksync_basic_types::H256, full_transactions: bool, - ) -> jsonrpc_core::BoxFuture< - jsonrpc_core::Result< - Option>, - >, - > { - let inner = Arc::clone(&self.inner); + ) -> RpcResult>> { + let inner = self.get_inner().clone(); Box::pin(async move { let maybe_block = { @@ -2099,8 +468,8 @@ impl EthNamespaceT for fn get_transaction_by_hash( &self, hash: zksync_basic_types::H256, - ) -> jsonrpc_core::BoxFuture>> { - let inner = Arc::clone(&self.inner); + ) -> RpcResult> { + let inner = self.get_inner().clone(); Box::pin(async move { let reader = inner @@ -2170,10 +539,8 @@ impl EthNamespaceT for } /// Returns the current block number as a `U64` wrapped in a `BoxFuture`. - fn get_block_number( - &self, - ) -> jsonrpc_core::BoxFuture> { - let inner = Arc::clone(&self.inner); + fn get_block_number(&self) -> RpcResult { + let inner = self.get_inner().clone(); Box::pin(async move { let reader = inner @@ -2196,9 +563,9 @@ impl EthNamespaceT for fn estimate_gas( &self, req: zksync_types::transaction_request::CallRequest, - _block: Option, - ) -> jsonrpc_core::BoxFuture> { - let inner = Arc::clone(&self.inner); + _block: Option, + ) -> RpcResult { + let inner = self.get_inner().clone(); let reader = match inner.read() { Ok(r) => r, Err(_) => { @@ -2214,7 +581,7 @@ impl EthNamespaceT for } /// Returns the current gas price in U256 format. - fn gas_price(&self) -> jsonrpc_core::BoxFuture> { + fn gas_price(&self) -> RpcResult { let fair_l2_gas_price: u64 = L2_GAS_PRICE; Ok(U256::from(fair_l2_gas_price)).into_boxed_future() } @@ -2241,8 +608,8 @@ impl EthNamespaceT for /// # Returns /// /// A `BoxFuture` containing a `jsonrpc_core::Result` that resolves to an `U256` filter id. - fn new_filter(&self, filter: Filter) -> jsonrpc_core::BoxFuture> { - let inner = Arc::clone(&self.inner); + fn new_filter(&self, filter: Filter) -> RpcResult { + let inner = self.get_inner().clone(); let mut writer = match inner.write() { Ok(r) => r, Err(_) => { @@ -2250,12 +617,8 @@ impl EthNamespaceT for } }; - let from_block = filter - .from_block - .unwrap_or(zksync_types::api::BlockNumber::Latest); - let to_block = filter - .to_block - .unwrap_or(zksync_types::api::BlockNumber::Latest); + let from_block = filter.from_block.unwrap_or(BlockNumber::Latest); + let to_block = filter.to_block.unwrap_or(BlockNumber::Latest); let addresses = filter.address.unwrap_or_default().0; let mut topics: [Option>; 4] = Default::default(); @@ -2284,8 +647,8 @@ impl EthNamespaceT for /// # Returns /// /// A `BoxFuture` containing a `jsonrpc_core::Result` that resolves to an `U256` filter id. - fn new_block_filter(&self) -> jsonrpc_core::BoxFuture> { - let inner = Arc::clone(&self.inner); + fn new_block_filter(&self) -> RpcResult { + let inner = self.get_inner().clone(); let mut writer = match inner.write() { Ok(r) => r, Err(_) => { @@ -2306,10 +669,8 @@ impl EthNamespaceT for /// # Returns /// /// A `BoxFuture` containing a `jsonrpc_core::Result` that resolves to an `U256` filter id. - fn new_pending_transaction_filter( - &self, - ) -> jsonrpc_core::BoxFuture> { - let inner = Arc::clone(&self.inner); + fn new_pending_transaction_filter(&self) -> RpcResult { + let inner = self.get_inner().clone(); let mut writer = match inner.write() { Ok(r) => r, Err(_) => { @@ -2333,8 +694,8 @@ impl EthNamespaceT for /// # Returns /// /// A `BoxFuture` containing a `jsonrpc_core::Result` that resolves to an `U256` filter id. - fn uninstall_filter(&self, id: U256) -> jsonrpc_core::BoxFuture> { - let inner = Arc::clone(&self.inner); + fn uninstall_filter(&self, id: U256) -> RpcResult { + let inner = self.get_inner().clone(); let mut writer = match inner.write() { Ok(r) => r, Err(_) => { @@ -2360,22 +721,16 @@ impl EthNamespaceT for /// # Returns /// /// A `BoxFuture` containing a `jsonrpc_core::Result` that resolves to an array of logs. - fn get_logs( - &self, - filter: Filter, - ) -> jsonrpc_core::BoxFuture>> { - let reader = match self.inner.read() { + fn get_logs(&self, filter: Filter) -> RpcResult> { + let inner = self.get_inner(); + let reader = match inner.read() { Ok(r) => r, Err(_) => { return futures::future::err(into_jsrpc_error(Web3Error::InternalError)).boxed() } }; - let from_block = filter - .from_block - .unwrap_or(zksync_types::api::BlockNumber::Earliest); - let to_block = filter - .to_block - .unwrap_or(zksync_types::api::BlockNumber::Latest); + let from_block = filter.from_block.unwrap_or(BlockNumber::Earliest); + let to_block = filter.to_block.unwrap_or(BlockNumber::Latest); let addresses = filter.address.unwrap_or_default().0; let mut topics: [Option>; 4] = Default::default(); @@ -2419,11 +774,9 @@ impl EthNamespaceT for /// # Returns /// /// A `BoxFuture` containing a `jsonrpc_core::Result` that resolves to an array of logs. - fn get_filter_logs( - &self, - id: U256, - ) -> jsonrpc_core::BoxFuture> { - let reader = match self.inner.read() { + fn get_filter_logs(&self, id: U256) -> RpcResult { + let inner = self.get_inner(); + let reader = match inner.read() { Ok(r) => r, Err(_) => { return futures::future::err(into_jsrpc_error(Web3Error::InternalError)).boxed() @@ -2464,11 +817,8 @@ impl EthNamespaceT for /// * Filters created with `eth_newFilter` return [Log] objects. /// * Filters created with `eth_newBlockFilter` return block hashes. /// * Filters created with `eth_newPendingTransactionFilter` return transaction hashes. - fn get_filter_changes( - &self, - id: U256, - ) -> jsonrpc_core::BoxFuture> { - let inner = Arc::clone(&self.inner); + fn get_filter_changes(&self, id: U256) -> RpcResult { + let inner = self.get_inner().clone(); let mut writer = match inner.write() { Ok(r) => r, Err(_) => { @@ -2485,9 +835,9 @@ impl EthNamespaceT for fn get_block_transaction_count_by_number( &self, - block_number: zksync_types::api::BlockNumber, - ) -> jsonrpc_core::BoxFuture>> { - let inner = Arc::clone(&self.inner); + block_number: BlockNumber, + ) -> RpcResult> { + let inner = self.get_inner().clone(); Box::pin(async move { let maybe_result = { @@ -2531,8 +881,8 @@ impl EthNamespaceT for fn get_block_transaction_count_by_hash( &self, block_hash: zksync_basic_types::H256, - ) -> jsonrpc_core::BoxFuture>> { - let inner = Arc::clone(&self.inner); + ) -> RpcResult> { + let inner = self.get_inner().clone(); Box::pin(async move { let reader = inner @@ -2582,9 +932,9 @@ impl EthNamespaceT for &self, address: zksync_basic_types::Address, idx: U256, - block: Option, - ) -> jsonrpc_core::BoxFuture> { - let inner = Arc::clone(&self.inner); + block: Option, + ) -> RpcResult { + let inner = self.get_inner().clone(); Box::pin(async move { let mut writer = match inner.write() { @@ -2598,19 +948,15 @@ impl EthNamespaceT for let block_number = block .map(|block| match block { - zksync_types::api::BlockIdVariant::BlockNumber(block_number) => { - Ok(utils::to_real_block_number( - block_number, - U64::from(writer.current_miniblock), - )) - } - zksync_types::api::BlockIdVariant::BlockNumberObject(o) => { - Ok(utils::to_real_block_number( - o.block_number, - U64::from(writer.current_miniblock), - )) - } - zksync_types::api::BlockIdVariant::BlockHashObject(o) => writer + BlockIdVariant::BlockNumber(block_number) => Ok(utils::to_real_block_number( + block_number, + U64::from(writer.current_miniblock), + )), + BlockIdVariant::BlockNumberObject(o) => Ok(utils::to_real_block_number( + o.block_number, + U64::from(writer.current_miniblock), + )), + BlockIdVariant::BlockHashObject(o) => writer .blocks .get(&o.block_hash) .map(|block| block.number) @@ -2676,8 +1022,8 @@ impl EthNamespaceT for &self, block_hash: zksync_basic_types::H256, index: zksync_basic_types::web3::types::Index, - ) -> jsonrpc_core::BoxFuture>> { - let inner = Arc::clone(&self.inner); + ) -> RpcResult> { + let inner = self.get_inner().clone(); Box::pin(async move { let reader = match inner.read() { @@ -2739,10 +1085,10 @@ impl EthNamespaceT for /// A `BoxFuture` containing a `jsonrpc_core::Result` that maybe resolves to a [zksync_types::api::Transaction], if found. fn get_transaction_by_block_number_and_index( &self, - block_number: zksync_types::api::BlockNumber, + block_number: BlockNumber, index: zksync_basic_types::web3::types::Index, - ) -> jsonrpc_core::BoxFuture>> { - let inner = Arc::clone(&self.inner); + ) -> RpcResult> { + let inner = self.get_inner().clone(); Box::pin(async move { let reader = match inner.read() { @@ -2800,15 +1146,12 @@ impl EthNamespaceT for /// # Returns /// /// A `BoxFuture` containing a `jsonrpc_core::Result` that resolves to a hex `String` of the version number. - fn protocol_version(&self) -> jsonrpc_core::BoxFuture> { + fn protocol_version(&self) -> RpcResult { Ok(String::from(PROTOCOL_VERSION)).into_boxed_future() } - fn syncing( - &self, - ) -> jsonrpc_core::BoxFuture> - { - Ok(zksync_basic_types::web3::types::SyncState::NotSyncing).into_boxed_future() + fn syncing(&self) -> RpcResult { + Ok(SyncState::NotSyncing).into_boxed_future() } /// Returns a list of available accounts. /// @@ -2821,8 +1164,8 @@ impl EthNamespaceT for /// # Returns /// /// A `BoxFuture` containing a `jsonrpc_core::Result` that resolves to a `Vec` of addresses. - fn accounts(&self) -> jsonrpc_core::BoxFuture>> { - let inner = Arc::clone(&self.inner); + fn accounts(&self) -> RpcResult> { + let inner = self.get_inner().clone(); let reader = match inner.read() { Ok(r) => r, Err(_) => { @@ -2834,35 +1177,30 @@ impl EthNamespaceT for futures::future::ok(accounts).boxed() } - fn coinbase( - &self, - ) -> jsonrpc_core::BoxFuture> { + fn coinbase(&self) -> RpcResult { not_implemented("eth_coinbase") } - fn compilers(&self) -> jsonrpc_core::BoxFuture>> { + fn compilers(&self) -> RpcResult> { not_implemented("eth_getCompilers") } - fn hashrate(&self) -> jsonrpc_core::BoxFuture> { + fn hashrate(&self) -> RpcResult { not_implemented("eth_hashrate") } fn get_uncle_count_by_block_hash( &self, _hash: zksync_basic_types::H256, - ) -> jsonrpc_core::BoxFuture>> { + ) -> RpcResult> { not_implemented("eth_getUncleCountByBlockHash") } - fn get_uncle_count_by_block_number( - &self, - _number: zksync_types::api::BlockNumber, - ) -> jsonrpc_core::BoxFuture>> { + fn get_uncle_count_by_block_number(&self, _number: BlockNumber) -> RpcResult> { not_implemented("eth_getUncleCountByBlockNumber") } - fn mining(&self) -> jsonrpc_core::BoxFuture> { + fn mining(&self) -> RpcResult { not_implemented("eth_mining") } @@ -2883,10 +1221,10 @@ impl EthNamespaceT for fn fee_history( &self, block_count: U64, - _newest_block: zksync_types::api::BlockNumber, + _newest_block: BlockNumber, reward_percentiles: Vec, - ) -> jsonrpc_core::BoxFuture> { - let inner = Arc::clone(&self.inner); + ) -> RpcResult { + let inner = self.get_inner().clone(); Box::pin(async move { let reader = inner @@ -2927,18 +1265,20 @@ impl EthNamespaceT for mod tests { use crate::{ cache::CacheConfig, + fork::ForkDetails, http_fork_source::HttpForkSource, - node::InMemoryNode, + node::{compute_hash, InMemoryNode, Snapshot}, testing::{ self, default_tx_debug_info, ForkBlockConfig, LogBuilder, MockServer, TransactionResponseBuilder, }, }; use maplit::hashmap; - use zksync_basic_types::Nonce; + use zksync_basic_types::{web3, Nonce}; use zksync_types::{ - api::{BlockHashObject, BlockNumber, BlockNumberObject}, + api::{BlockHashObject, BlockNumber, BlockNumberObject, TransactionReceipt}, utils::deployed_address_create, + PackedEthSignature, }; use zksync_web3_decl::types::{SyncState, ValueOrArray}; @@ -3080,7 +1420,8 @@ mod tests { Default::default(), ); - let inner = node.inner.read().unwrap(); + let inner = node.get_inner(); + let inner = inner.read().unwrap(); assert!( inner.blocks.contains_key(&input_block_hash), "block wasn't cached" @@ -3689,17 +2030,18 @@ mod tests { u256_to_h256(U256::zero()), ); let input_storage_value = H256::repeat_byte(0xcd); - node.inner + node.get_inner() .write() .unwrap() .fork_storage .set_value(input_storage_key, input_storage_value); - let initial_miniblock = node.inner.read().unwrap().current_miniblock; + let initial_miniblock = node.get_inner().read().unwrap().current_miniblock; testing::apply_tx(&node, H256::repeat_byte(0x1)); - let current_miniblock = node.inner.read().unwrap().current_miniblock; + let current_miniblock = node.get_inner().read().unwrap().current_miniblock; - let reader = node.inner.read().unwrap(); + let inner = node.get_inner(); + let reader = inner.read().unwrap(); for miniblock in initial_miniblock..current_miniblock { let actual_cached_value = reader .block_hashes @@ -3791,7 +2133,7 @@ mod tests { let input_storage_value = H256::repeat_byte(0xcd); let node = InMemoryNode::::default(); - node.inner + node.get_inner() .write() .map(|mut writer| { let historical_block = Block:: { @@ -3860,7 +2202,7 @@ mod tests { None, Default::default(), ); - node.inner + node.get_inner() .write() .map(|mut writer| { let historical_block = Block:: { @@ -3952,7 +2294,7 @@ mod tests { AccountTreeId::new(deployed_address), u256_to_h256(U256::from(0)), ); - node.inner + node.get_inner() .write() .unwrap() .fork_storage @@ -3990,7 +2332,8 @@ mod tests { // populate tx receipts with 2 tx each having logs { - let mut writer = node.inner.write().unwrap(); + let inner = node.get_inner(); + let mut writer = inner.write().unwrap(); writer.tx_results.insert( H256::repeat_byte(0x1), TransactionResult { @@ -4048,7 +2391,8 @@ mod tests { // populate tx receipts with 2 tx each having logs { - let mut writer = node.inner.write().unwrap(); + let inner = node.get_inner(); + let mut writer = inner.write().unwrap(); writer.tx_results.insert( H256::repeat_byte(0x1), TransactionResult { @@ -4076,7 +2420,8 @@ mod tests { // populate tx receipts with 2 tx each having logs { - let mut writer = node.inner.write().unwrap(); + let inner = node.get_inner(); + let mut writer = inner.write().unwrap(); writer.tx_results.insert( H256::repeat_byte(0x1), TransactionResult { @@ -4162,7 +2507,8 @@ mod tests { #[tokio::test] async fn test_snapshot() { let node = InMemoryNode::::default(); - let mut inner = node.inner.write().unwrap(); + let inner = node.get_inner(); + let mut inner = inner.write().unwrap(); inner .blocks @@ -4264,7 +2610,8 @@ mod tests { #[tokio::test] async fn test_snapshot_restore() { let node = InMemoryNode::::default(); - let mut inner = node.inner.write().unwrap(); + let inner = node.get_inner(); + let mut inner = inner.write().unwrap(); inner .blocks @@ -4463,7 +2810,8 @@ mod tests { // store the block info with just the tx hash invariant { - let mut writer = node.inner.write().unwrap(); + let inner = node.get_inner(); + let mut writer = inner.write().unwrap(); writer.blocks.insert( input_block_hash, Block { @@ -4612,7 +2960,8 @@ mod tests { // store the block info with just the tx hash invariant { - let mut writer = node.inner.write().unwrap(); + let inner = node.get_inner(); + let mut writer = inner.write().unwrap(); writer .block_hashes .insert(input_block_number.as_u64(), input_block_hash); diff --git a/src/node/evm.rs b/src/node/evm.rs new file mode 100644 index 00000000..71af8c19 --- /dev/null +++ b/src/node/evm.rs @@ -0,0 +1,68 @@ +use zksync_basic_types::U64; +use zksync_core::api_server::web3::backend_jsonrpc::error::into_jsrpc_error; +use zksync_web3_decl::error::Web3Error; + +use crate::{ + fork::ForkSource, + namespaces::{EvmNamespaceT, RpcResult}, + node::InMemoryNode, + utils::IntoBoxedFuture, +}; + +impl EvmNamespaceT + for InMemoryNode +{ + fn increase_time(&self, time_delta_seconds: u64) -> RpcResult { + self.increase_time(time_delta_seconds) + .map_err(|err| { + tracing::error!("failed increasing time: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .into_boxed_future() + } + + fn evm_mine(&self) -> RpcResult { + self.mine_block() + .map_err(|err| { + tracing::error!("failed mining block: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .into_boxed_future() + } + + fn set_next_block_timestamp(&self, timestamp: u64) -> RpcResult { + self.set_next_block_timestamp(timestamp) + .map_err(|err| { + tracing::error!("failed setting time for next timestamp: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .into_boxed_future() + } + + fn set_time(&self, time: u64) -> RpcResult { + self.set_time(time) + .map_err(|err| { + tracing::error!("failed setting time: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .into_boxed_future() + } + + fn snapshot(&self) -> RpcResult { + self.snapshot() + .map_err(|err| { + tracing::error!("failed creating snapshot: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .into_boxed_future() + } + + fn revert_snapshot(&self, snapshot_id: U64) -> RpcResult { + self.revert_snapshot(snapshot_id) + .map_err(|err| { + tracing::error!("failed reverting snapshot: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .into_boxed_future() + } +} diff --git a/src/node/hardhat.rs b/src/node/hardhat.rs new file mode 100644 index 00000000..23abda98 --- /dev/null +++ b/src/node/hardhat.rs @@ -0,0 +1,68 @@ +use zksync_basic_types::{Address, U256, U64}; +use zksync_core::api_server::web3::backend_jsonrpc::error::into_jsrpc_error; +use zksync_web3_decl::error::Web3Error; + +use crate::{ + fork::ForkSource, + namespaces::{HardhatNamespaceT, RpcResult}, + node::InMemoryNode, + utils::IntoBoxedFuture, +}; + +impl HardhatNamespaceT + for InMemoryNode +{ + fn set_balance(&self, address: Address, balance: U256) -> RpcResult { + self.set_balance(address, balance) + .map_err(|err| { + tracing::error!("failed setting balance : {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .into_boxed_future() + } + + fn set_nonce(&self, address: Address, balance: U256) -> RpcResult { + self.set_nonce(address, balance) + .map_err(|err| { + tracing::error!("failed setting nonce: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .into_boxed_future() + } + + fn hardhat_mine(&self, num_blocks: Option, interval: Option) -> RpcResult { + self.mine_blocks(num_blocks, interval) + .map_err(|err| { + tracing::error!("failed mining blocks: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .into_boxed_future() + } + + fn impersonate_account(&self, address: Address) -> RpcResult { + self.impersonate_account(address) + .map_err(|err| { + tracing::error!("failed impersonating account: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .into_boxed_future() + } + + fn stop_impersonating_account(&self, address: Address) -> RpcResult { + InMemoryNode::::stop_impersonating_account(self, address) + .map_err(|err| { + tracing::error!("failed stopping to impersonate account: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .into_boxed_future() + } + + fn set_code(&self, address: Address, code: Vec) -> RpcResult<()> { + self.set_code(address, code) + .map_err(|err| { + tracing::error!("failed setting code: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .into_boxed_future() + } +} diff --git a/src/node/in_memory.rs b/src/node/in_memory.rs new file mode 100644 index 00000000..a718918f --- /dev/null +++ b/src/node/in_memory.rs @@ -0,0 +1,1641 @@ +//! In-memory node, that supports forking other networks. +use crate::{ + bootloader_debug::{BootloaderDebug, BootloaderDebugTracer}, + console_log::ConsoleLogHandler, + deps::InMemoryStorage, + filters::EthFilters, + fork::{ForkDetails, ForkSource, ForkStorage}, + formatter, + observability::Observability, + system_contracts::{self, Options, SystemContracts}, + utils::{ + adjust_l1_gas_price_for_tx, bytecode_to_factory_dep, create_debug_output, to_human_size, + }, +}; +use clap::Parser; +use colored::Colorize; +use core::fmt::Display; +use indexmap::IndexMap; +use once_cell::sync::OnceCell; +use std::{ + cmp::{self}, + collections::{HashMap, HashSet}, + str::FromStr, + sync::{Arc, RwLock}, +}; + +use multivm::interface::{ + ExecutionResult, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + VmExecutionResultAndLogs, +}; +use multivm::vm_virtual_blocks::{ + constants::{BLOCK_GAS_LIMIT, BLOCK_OVERHEAD_PUBDATA, MAX_PUBDATA_PER_BLOCK}, + utils::{ + fee::derive_base_fee_and_gas_per_pubdata, + l2_blocks::load_last_l2_block, + overhead::{derive_overhead, OverheadCoeficients}, + }, + CallTracer, HistoryDisabled, Vm, VmTracer, +}; +use zksync_basic_types::{ + web3::signing::keccak256, Address, Bytes, L1BatchNumber, MiniblockNumber, H160, H256, U256, U64, +}; +use zksync_contracts::BaseSystemContracts; +use zksync_core::api_server::web3::backend_jsonrpc::error::into_jsrpc_error; +use zksync_state::{ReadStorage, StoragePtr, StorageView, WriteStorage}; +use zksync_types::{ + api::{Block, DebugCall, Log, TransactionReceipt, TransactionVariant}, + block::legacy_miniblock_hash, + fee::Fee, + get_nonce_key, + l2::L2Tx, + l2::TransactionType, + utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, + vm_trace::Call, + PackedEthSignature, StorageKey, StorageLogQueryType, StorageValue, Transaction, + ACCOUNT_CODE_STORAGE_ADDRESS, EIP_712_TX_TYPE, MAX_GAS_PER_PUBDATA_BYTE, MAX_L2_TX_GAS_LIMIT, +}; +use zksync_utils::{ + bytecode::{compress_bytecode, hash_bytecode}, + h256_to_account_address, h256_to_u256, u256_to_h256, +}; +use zksync_web3_decl::error::Web3Error; + +/// Max possible size of an ABI encoded tx (in bytes). +pub const MAX_TX_SIZE: usize = 1_000_000; +/// Timestamp of the first block (if not running in fork mode). +pub const NON_FORK_FIRST_BLOCK_TIMESTAMP: u64 = 1_000; +/// Network ID we use for the test node. +pub const TEST_NODE_NETWORK_ID: u32 = 260; +/// L1 Gas Price. +pub const L1_GAS_PRICE: u64 = 50_000_000_000; +/// L2 Gas Price (0.25 gwei). +pub const L2_GAS_PRICE: u64 = 250_000_000; +/// L1 Gas Price Scale Factor for gas estimation. +pub const ESTIMATE_GAS_L1_GAS_PRICE_SCALE_FACTOR: f64 = 1.2; +/// The max possible number of gas that `eth_estimateGas` is allowed to overestimate. +pub const ESTIMATE_GAS_PUBLISH_BYTE_OVERHEAD: u32 = 100; +/// Acceptable gas overestimation limit. +pub const ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION: u32 = 1_000; +/// The factor by which to scale the gasLimit. +pub const ESTIMATE_GAS_SCALE_FACTOR: f32 = 1.3; +/// The maximum number of previous blocks to store the state for. +pub const MAX_PREVIOUS_STATES: u16 = 128; +/// The zks protocol version. +pub const PROTOCOL_VERSION: &str = "zks/1"; + +pub fn compute_hash(block_number: u64, tx_hash: H256) -> H256 { + let digest = [&block_number.to_be_bytes()[..], tx_hash.as_bytes()].concat(); + H256(keccak256(&digest)) +} + +pub fn create_empty_block(block_number: u64, timestamp: u64, batch: u32) -> Block { + let hash = compute_hash(block_number, H256::zero()); + Block { + hash, + number: U64::from(block_number), + timestamp: U256::from(timestamp), + l1_batch_number: Some(U64::from(batch)), + transactions: vec![], + gas_used: U256::from(0), + gas_limit: U256::from(BLOCK_GAS_LIMIT), + ..Default::default() + } +} + +/// Information about the executed transaction. +#[derive(Debug, Clone)] +pub struct TxExecutionInfo { + pub tx: L2Tx, + // Batch number where transaction was executed. + pub batch_number: u32, + pub miniblock_number: u64, + pub result: VmExecutionResultAndLogs, +} + +#[derive(Debug, Default, clap::Parser, Clone, clap::ValueEnum, PartialEq, Eq)] +pub enum ShowCalls { + #[default] + None, + User, + System, + All, +} + +impl FromStr for ShowCalls { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_ref() { + "none" => Ok(ShowCalls::None), + "user" => Ok(ShowCalls::User), + "system" => Ok(ShowCalls::System), + "all" => Ok(ShowCalls::All), + _ => Err(format!( + "Unknown ShowCalls value {} - expected one of none|user|system|all.", + s + )), + } + } +} + +impl Display for ShowCalls { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!(f, "{:?}", self) + } +} + +#[derive(Debug, Default, Parser, Clone, clap::ValueEnum, PartialEq, Eq)] +pub enum ShowStorageLogs { + #[default] + None, + Read, + Write, + All, +} + +impl FromStr for ShowStorageLogs { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_ref() { + "none" => Ok(ShowStorageLogs::None), + "read" => Ok(ShowStorageLogs::Read), + "write" => Ok(ShowStorageLogs::Write), + "all" => Ok(ShowStorageLogs::All), + _ => Err(format!( + "Unknown ShowStorageLogs value {} - expected one of none|read|write|all.", + s + )), + } + } +} + +impl Display for ShowStorageLogs { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!(f, "{:?}", self) + } +} + +#[derive(Debug, Default, Parser, Clone, clap::ValueEnum, PartialEq, Eq)] +pub enum ShowVMDetails { + #[default] + None, + All, +} + +impl FromStr for ShowVMDetails { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_ref() { + "none" => Ok(ShowVMDetails::None), + "all" => Ok(ShowVMDetails::All), + _ => Err(format!( + "Unknown ShowVMDetails value {} - expected one of none|all.", + s + )), + } + } +} + +impl Display for ShowVMDetails { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!(f, "{:?}", self) + } +} + +#[derive(Debug, Default, Parser, Clone, clap::ValueEnum, PartialEq, Eq)] +pub enum ShowGasDetails { + #[default] + None, + All, +} + +impl FromStr for ShowGasDetails { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_ref() { + "none" => Ok(ShowGasDetails::None), + "all" => Ok(ShowGasDetails::All), + _ => Err(format!( + "Unknown ShowGasDetails value {} - expected one of none|all.", + s + )), + } + } +} + +impl Display for ShowGasDetails { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!(f, "{:?}", self) + } +} + +#[derive(Debug, Clone)] +pub struct TransactionResult { + pub info: TxExecutionInfo, + pub receipt: TransactionReceipt, + pub debug: DebugCall, +} + +impl TransactionResult { + /// Returns the debug information for the transaction. + /// If `only_top` is true - will only return the top level call. + pub fn debug_info(&self, only_top: bool) -> DebugCall { + let calls = if only_top { + vec![] + } else { + self.debug.calls.clone() + }; + DebugCall { + calls, + ..self.debug.clone() + } + } +} + +/// Helper struct for InMemoryNode. +/// S - is the Source of the Fork. +#[derive(Clone)] +pub struct InMemoryNodeInner { + /// The latest timestamp that was already generated. + /// Next block will be current_timestamp + 1 + pub current_timestamp: u64, + /// The latest batch number that was already generated. + /// Next block will be current_batch + 1 + pub current_batch: u32, + /// The latest miniblock number that was already generated. + /// Next transaction will go to the block current_miniblock + 1 + pub current_miniblock: u64, + /// The latest miniblock hash. + pub current_miniblock_hash: H256, + pub l1_gas_price: u64, + // Map from transaction to details about the exeuction + pub tx_results: HashMap, + // Map from block hash to information about the block. + pub blocks: HashMap>, + // Map from block number to a block hash. + pub block_hashes: HashMap, + // Map from filter_id to the eth filter + pub filters: EthFilters, + // Underlying storage + pub fork_storage: ForkStorage, + // Debug level information. + pub show_calls: ShowCalls, + // Displays storage logs. + pub show_storage_logs: ShowStorageLogs, + // Displays VM details. + pub show_vm_details: ShowVMDetails, + // Gas details information. + pub show_gas_details: ShowGasDetails, + // If true - will contact openchain to resolve the ABI to function names. + pub resolve_hashes: bool, + pub console_log_handler: ConsoleLogHandler, + pub system_contracts: SystemContracts, + pub impersonated_accounts: HashSet
, + pub rich_accounts: HashSet, + /// Keeps track of historical states indexed via block hash. Limited to [MAX_PREVIOUS_STATES]. + pub previous_states: IndexMap>, + /// An optional handle to the observability stack + pub observability: Option, +} + +type L2TxResult = ( + HashMap, + VmExecutionResultAndLogs, + Vec, + Block, + HashMap>, + BlockContext, +); + +impl InMemoryNodeInner { + pub fn create_l1_batch_env( + &self, + storage: StoragePtr, + ) -> (L1BatchEnv, BlockContext) { + let last_l2_block_hash = if let Some(last_l2_block) = load_last_l2_block(storage) { + last_l2_block.hash + } else { + // This is the scenario of either the first L2 block ever or + // the first block after the upgrade for support of L2 blocks. + legacy_miniblock_hash(MiniblockNumber(self.current_miniblock as u32)) + }; + let block_ctx = BlockContext::from_current( + self.current_batch, + self.current_miniblock, + self.current_timestamp, + ); + let block_ctx = block_ctx.new_batch(); + let batch_env = L1BatchEnv { + // TODO: set the previous batch hash properly (take from fork, when forking, and from local storage, when this is not the first block). + previous_batch_hash: None, + number: L1BatchNumber::from(block_ctx.batch), + timestamp: block_ctx.timestamp, + l1_gas_price: self.l1_gas_price, + fair_l2_gas_price: L2_GAS_PRICE, + fee_account: H160::zero(), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + // the 'current_miniblock' contains the block that was already produced. + // So the next one should be one higher. + number: block_ctx.miniblock as u32, + timestamp: block_ctx.timestamp, + prev_block_hash: last_l2_block_hash, + // This is only used during zksyncEra block timestamp/number transition. + // In case of starting a new network, it doesn't matter. + // In theory , when forking mainnet, we should match this value + // to the value that was set in the node at that time - but AFAIK + // we don't have any API for this - so this might result in slightly + // incorrect replays of transacions during the migration period, that + // depend on block number or timestamp. + max_virtual_blocks_to_create: 1, + }, + }; + + (batch_env, block_ctx) + } + + pub fn create_system_env( + &self, + base_system_contracts: BaseSystemContracts, + execution_mode: TxExecutionMode, + ) -> SystemEnv { + SystemEnv { + zk_porter_available: false, + // TODO: when forking, we could consider taking the protocol version id from the fork itself. + version: zksync_types::ProtocolVersionId::latest(), + base_system_smart_contracts: base_system_contracts, + gas_limit: BLOCK_GAS_LIMIT, + execution_mode, + default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, + chain_id: self.fork_storage.chain_id, + } + } + + /// Estimates the gas required for a given call request. + /// + /// # Arguments + /// + /// * `req` - A `CallRequest` struct representing the call request to estimate gas for. + /// + /// # Returns + /// + /// A `Result` with a `Fee` representing the estimated gas related data. + pub fn estimate_gas_impl( + &self, + req: zksync_types::transaction_request::CallRequest, + ) -> jsonrpc_core::Result { + let mut request_with_gas_per_pubdata_overridden = req; + + if let Some(ref mut eip712_meta) = request_with_gas_per_pubdata_overridden.eip712_meta { + if eip712_meta.gas_per_pubdata == U256::zero() { + eip712_meta.gas_per_pubdata = MAX_GAS_PER_PUBDATA_BYTE.into(); + } + } + + let is_eip712 = request_with_gas_per_pubdata_overridden + .eip712_meta + .is_some(); + + let mut l2_tx = + match L2Tx::from_request(request_with_gas_per_pubdata_overridden.into(), MAX_TX_SIZE) { + Ok(tx) => tx, + Err(e) => { + let error = Web3Error::SerializationError(e); + return Err(into_jsrpc_error(error)); + } + }; + + let tx: Transaction = l2_tx.clone().into(); + let fair_l2_gas_price = L2_GAS_PRICE; + + // Calculate Adjusted L1 Price + let l1_gas_price = { + let current_l1_gas_price = + ((self.l1_gas_price as f64) * ESTIMATE_GAS_L1_GAS_PRICE_SCALE_FACTOR) as u64; + + // In order for execution to pass smoothly, we need to ensure that block's required gasPerPubdata will be + // <= to the one in the transaction itself. + adjust_l1_gas_price_for_tx( + current_l1_gas_price, + L2_GAS_PRICE, + tx.gas_per_pubdata_byte_limit(), + ) + }; + + let (base_fee, gas_per_pubdata_byte) = + derive_base_fee_and_gas_per_pubdata(l1_gas_price, fair_l2_gas_price); + + // Properly format signature + if l2_tx.common_data.signature.is_empty() { + l2_tx.common_data.signature = vec![0u8; 65]; + l2_tx.common_data.signature[64] = 27; + } + + // The user may not include the proper transaction type during the estimation of + // the gas fee. However, it is needed for the bootloader checks to pass properly. + if is_eip712 { + l2_tx.common_data.transaction_type = TransactionType::EIP712Transaction; + } + + l2_tx.common_data.fee.gas_per_pubdata_limit = MAX_GAS_PER_PUBDATA_BYTE.into(); + l2_tx.common_data.fee.max_fee_per_gas = base_fee.into(); + l2_tx.common_data.fee.max_priority_fee_per_gas = base_fee.into(); + + let mut storage_view = StorageView::new(&self.fork_storage); + + // Calculate gas_for_bytecodes_pubdata + let pubdata_for_factory_deps = l2_tx + .execute + .factory_deps + .as_deref() + .unwrap_or_default() + .iter() + .map(|bytecode| { + if storage_view.is_bytecode_known(&hash_bytecode(bytecode)) { + return 0; + } + + let length = if let Ok(compressed) = compress_bytecode(bytecode) { + compressed.len() + } else { + bytecode.len() + }; + length as u32 + ESTIMATE_GAS_PUBLISH_BYTE_OVERHEAD + }) + .sum::(); + + if pubdata_for_factory_deps > MAX_PUBDATA_PER_BLOCK { + return Err(into_jsrpc_error(Web3Error::SubmitTransactionError( + "exceeds limit for published pubdata".into(), + Default::default(), + ))); + } + + let gas_for_bytecodes_pubdata: u32 = + pubdata_for_factory_deps * (gas_per_pubdata_byte as u32); + + let storage = storage_view.to_rc_ptr(); + + let execution_mode = TxExecutionMode::EstimateFee; + let (mut batch_env, _) = self.create_l1_batch_env(storage.clone()); + batch_env.l1_gas_price = l1_gas_price; + let system_env = self.create_system_env( + self.system_contracts.contracts_for_fee_estimate().clone(), + execution_mode, + ); + + // We are using binary search to find the minimal values of gas_limit under which the transaction succeeds + let mut lower_bound = 0; + let mut upper_bound = MAX_L2_TX_GAS_LIMIT as u32; + let mut attempt_count = 1; + + tracing::trace!("Starting gas estimation loop"); + while lower_bound + ESTIMATE_GAS_ACCEPTABLE_OVERESTIMATION < upper_bound { + let mid = (lower_bound + upper_bound) / 2; + tracing::trace!( + "Attempt {} (lower_bound: {}, upper_bound: {}, mid: {})", + attempt_count, + lower_bound, + upper_bound, + mid + ); + let try_gas_limit = gas_for_bytecodes_pubdata + mid; + + let estimate_gas_result = InMemoryNodeInner::estimate_gas_step( + l2_tx.clone(), + gas_per_pubdata_byte, + try_gas_limit, + l1_gas_price, + batch_env.clone(), + system_env.clone(), + &self.fork_storage, + ); + + if estimate_gas_result.result.is_failed() { + tracing::trace!("Attempt {} FAILED", attempt_count); + lower_bound = mid + 1; + } else { + tracing::trace!("Attempt {} SUCCEEDED", attempt_count); + upper_bound = mid; + } + attempt_count += 1; + } + + tracing::trace!("Gas Estimation Values:"); + tracing::trace!(" Final upper_bound: {}", upper_bound); + tracing::trace!(" ESTIMATE_GAS_SCALE_FACTOR: {}", ESTIMATE_GAS_SCALE_FACTOR); + tracing::trace!(" MAX_L2_TX_GAS_LIMIT: {}", MAX_L2_TX_GAS_LIMIT); + let tx_body_gas_limit = cmp::min( + MAX_L2_TX_GAS_LIMIT as u32, + (upper_bound as f32 * ESTIMATE_GAS_SCALE_FACTOR) as u32, + ); + let suggested_gas_limit = tx_body_gas_limit + gas_for_bytecodes_pubdata; + + let estimate_gas_result = InMemoryNodeInner::estimate_gas_step( + l2_tx.clone(), + gas_per_pubdata_byte, + suggested_gas_limit, + l1_gas_price, + batch_env, + system_env, + &self.fork_storage, + ); + + let coefficients = OverheadCoeficients::from_tx_type(EIP_712_TX_TYPE); + let overhead: u32 = derive_overhead( + suggested_gas_limit, + gas_per_pubdata_byte as u32, + tx.encoding_len(), + coefficients, + ); + + match estimate_gas_result.result { + ExecutionResult::Revert { output } => { + tracing::info!("{}", format!("Unable to estimate gas for the request with our suggested gas limit of {}. The transaction is most likely unexecutable. Breakdown of estimation:", suggested_gas_limit + overhead).red()); + tracing::info!( + "{}", + format!( + "\tEstimated transaction body gas cost: {}", + tx_body_gas_limit + ) + .red() + ); + tracing::info!( + "{}", + format!("\tGas for pubdata: {}", gas_for_bytecodes_pubdata).red() + ); + tracing::info!("{}", format!("\tOverhead: {}", overhead).red()); + let message = output.to_string(); + let pretty_message = format!( + "execution reverted{}{}", + if message.is_empty() { "" } else { ": " }, + message + ); + let data = output.encoded_data(); + tracing::info!("{}", pretty_message.on_red()); + Err(into_jsrpc_error(Web3Error::SubmitTransactionError( + pretty_message, + data, + ))) + } + ExecutionResult::Halt { reason } => { + tracing::info!("{}", format!("Unable to estimate gas for the request with our suggested gas limit of {}. The transaction is most likely unexecutable. Breakdown of estimation:", suggested_gas_limit + overhead).red()); + tracing::info!( + "{}", + format!( + "\tEstimated transaction body gas cost: {}", + tx_body_gas_limit + ) + .red() + ); + tracing::info!( + "{}", + format!("\tGas for pubdata: {}", gas_for_bytecodes_pubdata).red() + ); + tracing::info!("{}", format!("\tOverhead: {}", overhead).red()); + let message = reason.to_string(); + let pretty_message = format!( + "execution reverted{}{}", + if message.is_empty() { "" } else { ": " }, + message + ); + + tracing::info!("{}", pretty_message.on_red()); + Err(into_jsrpc_error(Web3Error::SubmitTransactionError( + pretty_message, + vec![], + ))) + } + ExecutionResult::Success { .. } => { + let full_gas_limit = match tx_body_gas_limit + .overflowing_add(gas_for_bytecodes_pubdata + overhead) + { + (value, false) => value, + (_, true) => { + tracing::info!("{}", "Overflow when calculating gas estimation. We've exceeded the block gas limit by summing the following values:".red()); + tracing::info!( + "{}", + format!( + "\tEstimated transaction body gas cost: {}", + tx_body_gas_limit + ) + .red() + ); + tracing::info!( + "{}", + format!("\tGas for pubdata: {}", gas_for_bytecodes_pubdata).red() + ); + tracing::info!("{}", format!("\tOverhead: {}", overhead).red()); + return Err(into_jsrpc_error(Web3Error::SubmitTransactionError( + "exceeds block gas limit".into(), + Default::default(), + ))); + } + }; + + tracing::trace!("Gas Estimation Results"); + tracing::trace!(" tx_body_gas_limit: {}", tx_body_gas_limit); + tracing::trace!(" gas_for_bytecodes_pubdata: {}", gas_for_bytecodes_pubdata); + tracing::trace!(" overhead: {}", overhead); + tracing::trace!(" full_gas_limit: {}", full_gas_limit); + let fee = Fee { + max_fee_per_gas: base_fee.into(), + max_priority_fee_per_gas: 0u32.into(), + gas_limit: full_gas_limit.into(), + gas_per_pubdata_limit: gas_per_pubdata_byte.into(), + }; + Ok(fee) + } + } + } + + /// Runs fee estimation against a sandbox vm with the given gas_limit. + #[allow(clippy::too_many_arguments)] + fn estimate_gas_step( + mut l2_tx: L2Tx, + gas_per_pubdata_byte: u64, + tx_gas_limit: u32, + l1_gas_price: u64, + mut batch_env: L1BatchEnv, + system_env: SystemEnv, + fork_storage: &ForkStorage, + ) -> VmExecutionResultAndLogs { + let tx: Transaction = l2_tx.clone().into(); + let l1_gas_price = + adjust_l1_gas_price_for_tx(l1_gas_price, L2_GAS_PRICE, tx.gas_per_pubdata_byte_limit()); + + let coefficients = OverheadCoeficients::from_tx_type(EIP_712_TX_TYPE); + // Set gas_limit for transaction + let gas_limit_with_overhead = tx_gas_limit + + derive_overhead( + tx_gas_limit, + gas_per_pubdata_byte as u32, + tx.encoding_len(), + coefficients, + ); + l2_tx.common_data.fee.gas_limit = gas_limit_with_overhead.into(); + + let storage = StorageView::new(fork_storage).to_rc_ptr(); + + // The nonce needs to be updated + let nonce = l2_tx.nonce(); + let nonce_key = get_nonce_key(&l2_tx.initiator_account()); + let full_nonce = storage.borrow_mut().read_value(&nonce_key); + let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); + let enforced_full_nonce = nonces_to_full_nonce(U256::from(nonce.0), deployment_nonce); + storage + .borrow_mut() + .set_value(nonce_key, u256_to_h256(enforced_full_nonce)); + + // We need to explicitly put enough balance into the account of the users + let payer = l2_tx.payer(); + let balance_key = storage_key_for_eth_balance(&payer); + let mut current_balance = h256_to_u256(storage.borrow_mut().read_value(&balance_key)); + let added_balance = l2_tx.common_data.fee.gas_limit * l2_tx.common_data.fee.max_fee_per_gas; + current_balance += added_balance; + storage + .borrow_mut() + .set_value(balance_key, u256_to_h256(current_balance)); + + batch_env.l1_gas_price = l1_gas_price; + + let mut vm = Vm::new(batch_env, system_env, storage, HistoryDisabled); + + let tx: Transaction = l2_tx.into(); + vm.push_transaction(tx); + + vm.execute(VmExecutionMode::OneTx) + } + + /// Sets the `impersonated_account` field of the node. + /// This field is used to override the `tx.initiator_account` field of the transaction in the `run_l2_tx` method. + pub fn set_impersonated_account(&mut self, address: Address) -> bool { + self.impersonated_accounts.insert(address) + } + + /// Clears the `impersonated_account` field of the node. + pub fn stop_impersonating_account(&mut self, address: Address) -> bool { + self.impersonated_accounts.remove(&address) + } + + /// Archives the current state for later queries. + pub fn archive_state(&mut self) -> Result<(), String> { + if self.previous_states.len() > MAX_PREVIOUS_STATES as usize { + if let Some(entry) = self.previous_states.shift_remove_index(0) { + tracing::debug!("removing archived state for previous block {:#x}", entry.0); + } + } + tracing::debug!( + "archiving state for {:#x} #{}", + self.current_miniblock_hash, + self.current_miniblock + ); + self.previous_states.insert( + self.current_miniblock_hash, + self.fork_storage + .inner + .read() + .map_err(|err| err.to_string())? + .raw_storage + .state + .clone(), + ); + + Ok(()) + } + + /// Creates a [Snapshot] of the current state of the node. + pub fn snapshot(&self) -> Result { + let storage = self + .fork_storage + .inner + .read() + .map_err(|err| format!("failed acquiring read lock on storage: {:?}", err))?; + + Ok(Snapshot { + current_timestamp: self.current_timestamp, + current_batch: self.current_batch, + current_miniblock: self.current_miniblock, + current_miniblock_hash: self.current_miniblock_hash, + l1_gas_price: self.l1_gas_price, + tx_results: self.tx_results.clone(), + blocks: self.blocks.clone(), + block_hashes: self.block_hashes.clone(), + filters: self.filters.clone(), + impersonated_accounts: self.impersonated_accounts.clone(), + rich_accounts: self.rich_accounts.clone(), + previous_states: self.previous_states.clone(), + raw_storage: storage.raw_storage.clone(), + value_read_cache: storage.value_read_cache.clone(), + factory_dep_cache: storage.factory_dep_cache.clone(), + }) + } + + /// Restores a previously created [Snapshot] of the node. + pub fn restore_snapshot(&mut self, snapshot: Snapshot) -> Result<(), String> { + let mut storage = self + .fork_storage + .inner + .write() + .map_err(|err| format!("failed acquiring write lock on storage: {:?}", err))?; + + self.current_timestamp = snapshot.current_timestamp; + self.current_batch = snapshot.current_batch; + self.current_miniblock = snapshot.current_miniblock; + self.current_miniblock_hash = snapshot.current_miniblock_hash; + self.l1_gas_price = snapshot.l1_gas_price; + self.tx_results = snapshot.tx_results; + self.blocks = snapshot.blocks; + self.block_hashes = snapshot.block_hashes; + self.filters = snapshot.filters; + self.impersonated_accounts = snapshot.impersonated_accounts; + self.rich_accounts = snapshot.rich_accounts; + self.previous_states = snapshot.previous_states; + storage.raw_storage = snapshot.raw_storage; + storage.value_read_cache = snapshot.value_read_cache; + storage.factory_dep_cache = snapshot.factory_dep_cache; + + Ok(()) + } +} + +/// Creates a restorable snapshot for the [InMemoryNodeInner]. The snapshot contains all the necessary +/// data required to restore the [InMemoryNodeInner] state to a previous point in time. +#[derive(Debug, Clone)] +pub struct Snapshot { + pub(crate) current_timestamp: u64, + pub(crate) current_batch: u32, + pub(crate) current_miniblock: u64, + pub(crate) current_miniblock_hash: H256, + pub(crate) l1_gas_price: u64, + pub(crate) tx_results: HashMap, + pub(crate) blocks: HashMap>, + pub(crate) block_hashes: HashMap, + pub(crate) filters: EthFilters, + pub(crate) impersonated_accounts: HashSet
, + pub(crate) rich_accounts: HashSet, + pub(crate) previous_states: IndexMap>, + pub(crate) raw_storage: InMemoryStorage, + pub(crate) value_read_cache: HashMap, + pub(crate) factory_dep_cache: HashMap>>, +} + +/// Defines the configuration parameters for the [InMemoryNode]. +#[derive(Default, Debug, Clone)] +pub struct InMemoryNodeConfig { + pub show_calls: ShowCalls, + pub show_storage_logs: ShowStorageLogs, + pub show_vm_details: ShowVMDetails, + pub show_gas_details: ShowGasDetails, + pub resolve_hashes: bool, + pub system_contracts_options: system_contracts::Options, +} + +/// In-memory node, that can be used for local & unit testing. +/// It also supports the option of forking testnet/mainnet. +/// All contents are removed when object is destroyed. +#[derive(Clone)] +pub struct InMemoryNode { + /// A thread safe reference to the [InMemoryNodeInner]. + inner: Arc>>, + /// List of snapshots of the [InMemoryNodeInner]. This is bounded at runtime by [MAX_SNAPSHOTS]. + pub(crate) snapshots: Arc>>, +} + +fn contract_address_from_tx_result(execution_result: &VmExecutionResultAndLogs) -> Option { + for query in execution_result.logs.storage_logs.iter().rev() { + if query.log_type == StorageLogQueryType::InitialWrite + && query.log_query.address == ACCOUNT_CODE_STORAGE_ADDRESS + { + return Some(h256_to_account_address(&u256_to_h256(query.log_query.key))); + } + } + None +} + +impl Default for InMemoryNode { + fn default() -> Self { + InMemoryNode::new(None, None, InMemoryNodeConfig::default()) + } +} + +impl InMemoryNode { + pub fn new( + fork: Option>, + observability: Option, + config: InMemoryNodeConfig, + ) -> Self { + let inner = if let Some(f) = &fork { + let mut block_hashes = HashMap::::new(); + block_hashes.insert(f.l2_block.number.as_u64(), f.l2_block.hash); + let mut blocks = HashMap::>::new(); + blocks.insert(f.l2_block.hash, f.l2_block.clone()); + + InMemoryNodeInner { + current_timestamp: f.block_timestamp, + current_batch: f.l1_block.0, + current_miniblock: f.l2_miniblock, + current_miniblock_hash: f.l2_miniblock_hash, + l1_gas_price: f.l1_gas_price, + tx_results: Default::default(), + blocks, + block_hashes, + filters: Default::default(), + fork_storage: ForkStorage::new(fork, &config.system_contracts_options), + show_calls: config.show_calls, + show_storage_logs: config.show_storage_logs, + show_vm_details: config.show_vm_details, + show_gas_details: config.show_gas_details, + resolve_hashes: config.resolve_hashes, + console_log_handler: ConsoleLogHandler::default(), + system_contracts: SystemContracts::from_options(&config.system_contracts_options), + impersonated_accounts: Default::default(), + rich_accounts: HashSet::new(), + previous_states: Default::default(), + observability, + } + } else { + let mut block_hashes = HashMap::::new(); + block_hashes.insert(0, H256::zero()); + let mut blocks = HashMap::>::new(); + blocks.insert( + H256::zero(), + create_empty_block(0, NON_FORK_FIRST_BLOCK_TIMESTAMP, 0), + ); + + InMemoryNodeInner { + current_timestamp: NON_FORK_FIRST_BLOCK_TIMESTAMP, + current_batch: 0, + current_miniblock: 0, + current_miniblock_hash: H256::zero(), + l1_gas_price: L1_GAS_PRICE, + tx_results: Default::default(), + blocks, + block_hashes, + filters: Default::default(), + fork_storage: ForkStorage::new(fork, &config.system_contracts_options), + show_calls: config.show_calls, + show_storage_logs: config.show_storage_logs, + show_vm_details: config.show_vm_details, + show_gas_details: config.show_gas_details, + resolve_hashes: config.resolve_hashes, + console_log_handler: ConsoleLogHandler::default(), + system_contracts: SystemContracts::from_options(&config.system_contracts_options), + impersonated_accounts: Default::default(), + rich_accounts: HashSet::new(), + previous_states: Default::default(), + observability, + } + }; + + InMemoryNode { + inner: Arc::new(RwLock::new(inner)), + snapshots: Default::default(), + } + } + + pub fn get_inner(&self) -> Arc>> { + self.inner.clone() + } + + /// Applies multiple transactions - but still one per L1 batch. + pub fn apply_txs(&self, txs: Vec) -> Result<(), String> { + tracing::info!("Running {:?} transactions (one per batch)", txs.len()); + + for tx in txs { + self.run_l2_tx(tx, TxExecutionMode::VerifyExecute)?; + } + + Ok(()) + } + + /// Adds a lot of tokens to a given account. + pub fn set_rich_account(&self, address: H160) { + let key = storage_key_for_eth_balance(&address); + + let mut inner = match self.inner.write() { + Ok(guard) => guard, + Err(e) => { + tracing::info!("Failed to acquire write lock: {}", e); + return; + } + }; + + let keys = { + let mut storage_view = StorageView::new(&inner.fork_storage); + storage_view.set_value(key, u256_to_h256(U256::from(10u128.pow(30)))); + storage_view.modified_storage_keys().clone() + }; + + for (key, value) in keys.iter() { + inner.fork_storage.set_value(*key, *value); + } + inner.rich_accounts.insert(address); + } + + /// Runs L2 'eth call' method - that doesn't commit to a block. + pub fn run_l2_call(&self, mut l2_tx: L2Tx) -> Result { + let execution_mode = TxExecutionMode::EthCall; + + let inner = self + .inner + .write() + .map_err(|e| format!("Failed to acquire write lock: {}", e))?; + + let storage = StorageView::new(&inner.fork_storage).to_rc_ptr(); + + let bootloader_code = inner.system_contracts.contracts_for_l2_call(); + + // init vm + + let (batch_env, _) = inner.create_l1_batch_env(storage.clone()); + let system_env = inner.create_system_env(bootloader_code.clone(), execution_mode); + + let mut vm = Vm::new(batch_env, system_env, storage, HistoryDisabled); + + // We must inject *some* signature (otherwise bootloader code fails to generate hash). + if l2_tx.common_data.signature.is_empty() { + l2_tx.common_data.signature = PackedEthSignature::default().serialize_packed().into(); + } + + let tx: Transaction = l2_tx.into(); + vm.push_transaction(tx); + + let call_tracer_result = Arc::new(OnceCell::default()); + + let custom_tracers = + vec![ + Box::new(CallTracer::new(call_tracer_result.clone(), HistoryDisabled)) + as Box>, HistoryDisabled>>, + ]; + + let tx_result = vm.inspect(custom_tracers, VmExecutionMode::OneTx); + + let call_traces = Arc::try_unwrap(call_tracer_result) + .unwrap() + .take() + .unwrap_or_default(); + + match &tx_result.result { + ExecutionResult::Success { output } => { + tracing::info!("Call: {}", "SUCCESS".green()); + let output_bytes = zksync_basic_types::Bytes::from(output.clone()); + tracing::info!("Output: {}", serde_json::to_string(&output_bytes).unwrap()); + } + ExecutionResult::Revert { output } => { + tracing::info!("Call: {}: {}", "FAILED".red(), output); + } + ExecutionResult::Halt { reason } => { + tracing::info!("Call: {} {}", "HALTED".red(), reason) + } + }; + + tracing::info!("=== Console Logs: "); + for call in &call_traces { + inner.console_log_handler.handle_call_recursive(call); + } + + tracing::info!("=== Call traces:"); + for call in &call_traces { + formatter::print_call(call, 0, &inner.show_calls, inner.resolve_hashes); + } + + Ok(tx_result.result) + } + + fn display_detailed_gas_info( + &self, + bootloader_debug_result: Option<&eyre::Result>, + spent_on_pubdata: u32, + ) -> eyre::Result<(), String> { + if let Some(bootloader_result) = bootloader_debug_result { + let bootloader_debug = bootloader_result.clone()?; + + tracing::info!("┌─────────────────────────┐"); + tracing::info!("│ GAS DETAILS │"); + tracing::info!("└─────────────────────────┘"); + + // Total amount of gas (should match tx.gas_limit). + let total_gas_limit = bootloader_debug + .total_gas_limit_from_user + .saturating_sub(bootloader_debug.reserved_gas); + + let intrinsic_gas = total_gas_limit - bootloader_debug.gas_limit_after_intrinsic; + let gas_for_validation = + bootloader_debug.gas_limit_after_intrinsic - bootloader_debug.gas_after_validation; + + let gas_spent_on_compute = bootloader_debug.gas_spent_on_execution + - bootloader_debug.gas_spent_on_bytecode_preparation; + + let gas_used = intrinsic_gas + + gas_for_validation + + bootloader_debug.gas_spent_on_bytecode_preparation + + gas_spent_on_compute; + + tracing::info!( + "Gas - Limit: {} | Used: {} | Refunded: {}", + to_human_size(total_gas_limit), + to_human_size(gas_used), + to_human_size(bootloader_debug.refund_by_operator) + ); + + if bootloader_debug.total_gas_limit_from_user != total_gas_limit { + tracing::info!( + "{}", + format!( + " WARNING: user actually provided more gas {}, but system had a lower max limit.", + to_human_size(bootloader_debug.total_gas_limit_from_user) + ) + .yellow() + ); + } + if bootloader_debug.refund_computed != bootloader_debug.refund_by_operator { + tracing::info!( + "{}", + format!( + " WARNING: Refund by VM: {}, but operator refunded more: {}", + to_human_size(bootloader_debug.refund_computed), + to_human_size(bootloader_debug.refund_by_operator) + ) + .yellow() + ); + } + + if bootloader_debug.refund_computed + gas_used != total_gas_limit { + tracing::info!( + "{}", + format!( + " WARNING: Gas totals don't match. {} != {} , delta: {}", + to_human_size(bootloader_debug.refund_computed + gas_used), + to_human_size(total_gas_limit), + to_human_size( + total_gas_limit.abs_diff(bootloader_debug.refund_computed + gas_used) + ) + ) + .yellow() + ); + } + + let bytes_published = spent_on_pubdata / bootloader_debug.gas_per_pubdata.as_u32(); + + tracing::info!( + "During execution published {} bytes to L1, @{} each - in total {} gas", + to_human_size(bytes_published.into()), + to_human_size(bootloader_debug.gas_per_pubdata), + to_human_size(spent_on_pubdata.into()) + ); + + tracing::info!("Out of {} gas used, we spent:", to_human_size(gas_used)); + tracing::info!( + " {:>15} gas ({:>2}%) for transaction setup", + to_human_size(intrinsic_gas), + to_human_size(intrinsic_gas * 100 / gas_used) + ); + tracing::info!( + " {:>15} gas ({:>2}%) for bytecode preparation (decompression etc)", + to_human_size(bootloader_debug.gas_spent_on_bytecode_preparation), + to_human_size(bootloader_debug.gas_spent_on_bytecode_preparation * 100 / gas_used) + ); + tracing::info!( + " {:>15} gas ({:>2}%) for account validation", + to_human_size(gas_for_validation), + to_human_size(gas_for_validation * 100 / gas_used) + ); + tracing::info!( + " {:>15} gas ({:>2}%) for computations (opcodes)", + to_human_size(gas_spent_on_compute), + to_human_size(gas_spent_on_compute * 100 / gas_used) + ); + + tracing::info!(""); + tracing::info!(""); + tracing::info!( + "{}", + "=== Transaction setup cost breakdown ===".to_owned().bold(), + ); + + tracing::info!("Total cost: {}", to_human_size(intrinsic_gas).bold()); + tracing::info!( + " {:>15} gas ({:>2}%) fixed cost", + to_human_size(bootloader_debug.intrinsic_overhead), + to_human_size(bootloader_debug.intrinsic_overhead * 100 / intrinsic_gas) + ); + tracing::info!( + " {:>15} gas ({:>2}%) operator cost", + to_human_size(bootloader_debug.operator_overhead), + to_human_size(bootloader_debug.operator_overhead * 100 / intrinsic_gas) + ); + + tracing::info!(""); + tracing::info!( + " FYI: operator could have charged up to: {}, so you got {}% discount", + to_human_size(bootloader_debug.required_overhead), + to_human_size( + (bootloader_debug.required_overhead - bootloader_debug.operator_overhead) * 100 + / bootloader_debug.required_overhead + ) + ); + + let publish_block_l1_bytes = BLOCK_OVERHEAD_PUBDATA; + tracing::info!( + "Publishing full block costs the operator up to: {}, where {} is due to {} bytes published to L1", + to_human_size(bootloader_debug.total_overhead_for_block), + to_human_size(bootloader_debug.gas_per_pubdata * publish_block_l1_bytes), + to_human_size(publish_block_l1_bytes.into()) + ); + tracing::info!("Your transaction has contributed to filling up the block in the following way (we take the max contribution as the cost):"); + tracing::info!( + " Circuits overhead:{:>15} ({}% of the full block: {})", + to_human_size(bootloader_debug.overhead_for_circuits), + to_human_size( + bootloader_debug.overhead_for_circuits * 100 + / bootloader_debug.total_overhead_for_block + ), + to_human_size(bootloader_debug.total_overhead_for_block) + ); + tracing::info!( + " Length overhead: {:>15}", + to_human_size(bootloader_debug.overhead_for_length) + ); + tracing::info!( + " Slot overhead: {:>15}", + to_human_size(bootloader_debug.overhead_for_slot) + ); + Ok(()) + } else { + Err("Booloader tracer didn't finish.".to_owned()) + } + } + + /// Executes the given L2 transaction and returns all the VM logs. + pub fn run_l2_tx_inner( + &self, + l2_tx: L2Tx, + execution_mode: TxExecutionMode, + ) -> Result { + let inner = self + .inner + .write() + .map_err(|e| format!("Failed to acquire write lock: {}", e))?; + + let storage = StorageView::new(&inner.fork_storage).to_rc_ptr(); + + let (batch_env, block_ctx) = inner.create_l1_batch_env(storage.clone()); + + // if we are impersonating an account, we need to use non-verifying system contracts + let nonverifying_contracts; + let bootloader_code = { + if inner + .impersonated_accounts + .contains(&l2_tx.common_data.initiator_address) + { + tracing::info!( + "🕵️ Executing tx from impersonated account {:?}", + l2_tx.common_data.initiator_address + ); + nonverifying_contracts = + SystemContracts::from_options(&Options::BuiltInWithoutSecurity); + nonverifying_contracts.contracts(execution_mode) + } else { + inner.system_contracts.contracts(execution_mode) + } + }; + let system_env = inner.create_system_env(bootloader_code.clone(), execution_mode); + + let mut vm = Vm::new( + batch_env.clone(), + system_env, + storage.clone(), + HistoryDisabled, + ); + + let tx: Transaction = l2_tx.clone().into(); + + vm.push_transaction(tx.clone()); + + let call_tracer_result = Arc::new(OnceCell::default()); + let bootloader_debug_result = Arc::new(OnceCell::default()); + + let custom_tracers = vec![ + Box::new(CallTracer::new(call_tracer_result.clone(), HistoryDisabled)) + as Box>, HistoryDisabled>>, + Box::new(BootloaderDebugTracer { + result: bootloader_debug_result.clone(), + }) as Box>, HistoryDisabled>>, + ]; + + let tx_result = vm.inspect(custom_tracers, VmExecutionMode::OneTx); + + let call_traces = call_tracer_result.get().unwrap(); + + let spent_on_pubdata = + tx_result.statistics.gas_used - tx_result.statistics.computational_gas_used; + + tracing::info!("┌─────────────────────────┐"); + tracing::info!("│ TRANSACTION SUMMARY │"); + tracing::info!("└─────────────────────────┘"); + + match &tx_result.result { + ExecutionResult::Success { .. } => tracing::info!("Transaction: {}", "SUCCESS".green()), + ExecutionResult::Revert { .. } => tracing::info!("Transaction: {}", "FAILED".red()), + ExecutionResult::Halt { .. } => tracing::info!("Transaction: {}", "HALTED".red()), + } + + tracing::info!("Initiator: {:?}", tx.initiator_account()); + tracing::info!("Payer: {:?}", tx.payer()); + tracing::info!( + "Gas - Limit: {} | Used: {} | Refunded: {}", + to_human_size(tx.gas_limit()), + to_human_size(tx.gas_limit() - tx_result.refunds.gas_refunded), + to_human_size(tx_result.refunds.gas_refunded.into()) + ); + + match inner.show_gas_details { + ShowGasDetails::None => tracing::info!( + "Use --show-gas-details flag or call config_setShowGasDetails to display more info" + ), + ShowGasDetails::All => { + if self + .display_detailed_gas_info(bootloader_debug_result.get(), spent_on_pubdata) + .is_err() + { + tracing::info!( + "{}", + "!!! FAILED TO GET DETAILED GAS INFO !!!".to_owned().red() + ); + } + } + } + + if inner.show_storage_logs != ShowStorageLogs::None { + tracing::info!(""); + tracing::info!("┌──────────────────┐"); + tracing::info!("│ STORAGE LOGS │"); + tracing::info!("└──────────────────┘"); + } + + for log_query in &tx_result.logs.storage_logs { + match inner.show_storage_logs { + ShowStorageLogs::Write => { + if matches!( + log_query.log_type, + StorageLogQueryType::RepeatedWrite | StorageLogQueryType::InitialWrite + ) { + formatter::print_logs(log_query); + } + } + ShowStorageLogs::Read => { + if log_query.log_type == StorageLogQueryType::Read { + formatter::print_logs(log_query); + } + } + ShowStorageLogs::All => { + formatter::print_logs(log_query); + } + _ => {} + } + } + + if inner.show_vm_details != ShowVMDetails::None { + formatter::print_vm_details(&tx_result); + } + + tracing::info!(""); + tracing::info!("==== Console logs: "); + for call in call_traces { + inner.console_log_handler.handle_call_recursive(call); + } + tracing::info!(""); + let call_traces_count = if !call_traces.is_empty() { + // All calls/sub-calls are stored within the first call trace + call_traces[0].calls.len() + } else { + 0 + }; + tracing::info!( + "==== {} Use --show-calls flag or call config_setShowCalls to display more info.", + format!("{:?} call traces. ", call_traces_count).bold() + ); + + if inner.show_calls != ShowCalls::None { + for call in call_traces { + formatter::print_call(call, 0, &inner.show_calls, inner.resolve_hashes); + } + } + tracing::info!(""); + tracing::info!( + "==== {}", + format!("{} events", tx_result.logs.events.len()).bold() + ); + for event in &tx_result.logs.events { + formatter::print_event(event, inner.resolve_hashes); + } + + // The computed block hash here will be different than that in production. + let hash = compute_hash(block_ctx.miniblock, l2_tx.hash()); + + let mut transaction = zksync_types::api::Transaction::from(l2_tx); + let block_hash = inner + .block_hashes + .get(&inner.current_miniblock) + .ok_or(format!( + "Block hash not found for block: {}", + inner.current_miniblock + ))?; + transaction.block_hash = Some(*block_hash); + transaction.block_number = Some(U64::from(inner.current_miniblock)); + + let block = Block { + hash, + number: U64::from(block_ctx.miniblock), + timestamp: U256::from(batch_env.timestamp), + l1_batch_number: Some(U64::from(batch_env.number.0)), + transactions: vec![TransactionVariant::Full(transaction)], + gas_used: U256::from(tx_result.statistics.gas_used), + gas_limit: U256::from(BLOCK_GAS_LIMIT), + ..Default::default() + }; + + tracing::info!(""); + tracing::info!(""); + + let bytecodes = vm + .get_last_tx_compressed_bytecodes() + .iter() + .map(|b| bytecode_to_factory_dep(b.original.clone())) + .collect(); + + vm.execute(VmExecutionMode::Bootloader); + + let modified_keys = storage.borrow().modified_storage_keys().clone(); + Ok(( + modified_keys, + tx_result, + call_traces.clone(), + block, + bytecodes, + block_ctx, + )) + } + + /// Runs L2 transaction and commits it to a new block. + pub fn run_l2_tx(&self, l2_tx: L2Tx, execution_mode: TxExecutionMode) -> Result<(), String> { + let tx_hash = l2_tx.hash(); + tracing::info!(""); + tracing::info!("Executing {}", format!("{:?}", tx_hash).bold()); + + { + let mut inner = self + .inner + .write() + .map_err(|e| format!("Failed to acquire write lock: {}", e))?; + inner.filters.notify_new_pending_transaction(tx_hash); + } + + let (keys, result, call_traces, block, bytecodes, block_ctx) = + self.run_l2_tx_inner(l2_tx.clone(), execution_mode)?; + + if let ExecutionResult::Halt { reason } = result.result { + // Halt means that something went really bad with the transaction execution (in most cases invalid signature, + // but it could also be bootloader panic etc). + // In such case, we should not persist the VM data, and we should pretend that transaction never existed. + return Err(format!("Transaction HALT: {}", reason)); + } + // Write all the mutated keys (storage slots). + let mut inner = self + .inner + .write() + .map_err(|e| format!("Failed to acquire write lock: {}", e))?; + for (key, value) in keys.iter() { + inner.fork_storage.set_value(*key, *value); + } + + // Write all the factory deps. + for (hash, code) in bytecodes.iter() { + inner.fork_storage.store_factory_dep( + u256_to_h256(*hash), + code.iter() + .flat_map(|entry| { + let mut bytes = vec![0u8; 32]; + entry.to_big_endian(&mut bytes); + bytes.to_vec() + }) + .collect(), + ) + } + + for (log_idx, event) in result.logs.events.iter().enumerate() { + inner.filters.notify_new_log( + &Log { + address: event.address, + topics: event.indexed_topics.clone(), + data: Bytes(event.value.clone()), + block_hash: Some(block.hash), + block_number: Some(block.number), + l1_batch_number: block.l1_batch_number, + transaction_hash: Some(tx_hash), + transaction_index: Some(U64::zero()), + log_index: Some(U256::from(log_idx)), + transaction_log_index: Some(U256::from(log_idx)), + log_type: None, + removed: None, + }, + block.number, + ); + } + let tx_receipt = TransactionReceipt { + transaction_hash: tx_hash, + transaction_index: U64::from(0), + block_hash: Some(block.hash), + block_number: Some(block.number), + l1_batch_tx_index: None, + l1_batch_number: block.l1_batch_number, + from: l2_tx.initiator_account(), + to: Some(l2_tx.recipient_account()), + cumulative_gas_used: Default::default(), + gas_used: Some(l2_tx.common_data.fee.gas_limit - result.refunds.gas_refunded), + contract_address: contract_address_from_tx_result(&result), + logs: result + .logs + .events + .iter() + .enumerate() + .map(|(log_idx, log)| Log { + address: log.address, + topics: log.indexed_topics.clone(), + data: Bytes(log.value.clone()), + block_hash: Some(block.hash), + block_number: Some(block.number), + l1_batch_number: block.l1_batch_number, + transaction_hash: Some(tx_hash), + transaction_index: Some(U64::zero()), + log_index: Some(U256::from(log_idx)), + transaction_log_index: Some(U256::from(log_idx)), + log_type: None, + removed: None, + }) + .collect(), + l2_to_l1_logs: vec![], + status: Some(if result.result.is_failed() { + U64::from(0) + } else { + U64::from(1) + }), + effective_gas_price: Some(L2_GAS_PRICE.into()), + ..Default::default() + }; + let debug = create_debug_output(&l2_tx, &result, call_traces).expect("create debug output"); // OK to unwrap here as Halt is handled above + inner.tx_results.insert( + tx_hash, + TransactionResult { + info: TxExecutionInfo { + tx: l2_tx, + batch_number: block.l1_batch_number.unwrap_or_default().as_u32(), + miniblock_number: block.number.as_u64(), + result, + }, + receipt: tx_receipt, + debug, + }, + ); + + // With the introduction of 'l2 blocks' (and virtual blocks), + // we are adding one l2 block at the end of each batch (to handle things like remaining events etc). + // You can look at insert_fictive_l2_block function in VM to see how this fake block is inserted. + let block_ctx = block_ctx.new_block(); + let empty_block_at_end_of_batch = + create_empty_block(block_ctx.miniblock, block_ctx.timestamp, block_ctx.batch); + + inner.current_batch = inner.current_batch.saturating_add(1); + + for block in vec![block, empty_block_at_end_of_batch] { + // archive current state before we produce new batch/blocks + if let Err(err) = inner.archive_state() { + tracing::error!( + "failed archiving state for block {}: {}", + inner.current_miniblock, + err + ); + } + + inner.current_miniblock = inner.current_miniblock.saturating_add(1); + inner.current_timestamp = inner.current_timestamp.saturating_add(1); + + let actual_l1_batch_number = block + .l1_batch_number + .expect("block must have a l1_batch_number"); + if actual_l1_batch_number.as_u32() != inner.current_batch { + panic!( + "expected next block to have batch_number {}, got {}", + inner.current_batch, + actual_l1_batch_number.as_u32() + ); + } + + if block.number.as_u64() != inner.current_miniblock { + panic!( + "expected next block to have miniblock {}, got {}", + inner.current_miniblock, + block.number.as_u64() + ); + } + + if block.timestamp.as_u64() != inner.current_timestamp { + panic!( + "expected next block to have timestamp {}, got {}", + inner.current_timestamp, + block.timestamp.as_u64() + ); + } + + let block_hash = block.hash; + inner.current_miniblock_hash = block_hash; + inner.block_hashes.insert(block.number.as_u64(), block.hash); + inner.blocks.insert(block.hash, block); + inner.filters.notify_new_block(block_hash); + } + + Ok(()) + } +} + +/// Keeps track of a block's batch number, miniblock number and timestamp. +/// Useful for keeping track of the current context when creating multiple blocks. +pub struct BlockContext { + pub batch: u32, + pub miniblock: u64, + pub timestamp: u64, +} + +impl BlockContext { + /// Create the current instance that represents the latest block. + pub fn from_current(batch: u32, miniblock: u64, timestamp: u64) -> Self { + Self { + batch, + miniblock, + timestamp, + } + } + + /// Create the next batch instance that has all parameters incremented by `1`. + pub fn new_batch(&self) -> Self { + Self { + batch: self.batch.saturating_add(1), + miniblock: self.miniblock.saturating_add(1), + timestamp: self.timestamp.saturating_add(1), + } + } + + /// Create the next batch instance that uses the same batch number, and has all other parameters incremented by `1`. + pub fn new_block(&self) -> BlockContext { + Self { + batch: self.batch, + miniblock: self.miniblock.saturating_add(1), + timestamp: self.timestamp.saturating_add(1), + } + } +} diff --git a/src/node/in_memory_ext.rs b/src/node/in_memory_ext.rs new file mode 100644 index 00000000..90e5395b --- /dev/null +++ b/src/node/in_memory_ext.rs @@ -0,0 +1,901 @@ +use anyhow::anyhow; +use zksync_basic_types::{Address, U256, U64}; +use zksync_state::ReadStorage; +use zksync_types::{ + get_code_key, get_nonce_key, + utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +use crate::{ + fork::ForkSource, + node::InMemoryNode, + utils::{self, bytecode_to_factory_dep}, +}; + +type Result = anyhow::Result; + +/// The maximum number of [Snapshot]s to store. Each snapshot represents the node state +/// and can be used to revert the node to an earlier point in time. +const MAX_SNAPSHOTS: u8 = 100; + +impl InMemoryNode { + /// Increase the current timestamp for the node + /// + /// # Parameters + /// - `time_delta`: The number of seconds to increase time by + /// + /// # Returns + /// The applied time delta to `current_timestamp` value for the InMemoryNodeInner. + pub fn increase_time(&self, time_delta_seconds: u64) -> Result { + self.get_inner() + .write() + .map_err(|err| anyhow!("failed acquiring lock: {:?}", err)) + .map(|mut writer| { + if time_delta_seconds == 0 { + return time_delta_seconds; + } + + let time_delta = time_delta_seconds.saturating_mul(1000); + writer.current_timestamp = writer.current_timestamp.saturating_add(time_delta); + time_delta_seconds + }) + } + + /// Set the current timestamp for the node. The timestamp must be in future. + /// + /// # Parameters + /// - `timestamp`: The timestamp to set the time to + /// + /// # Returns + /// The new timestamp value for the InMemoryNodeInner. + pub fn set_next_block_timestamp(&self, timestamp: u64) -> Result { + self.get_inner() + .write() + .map_err(|err| anyhow!("failed acquiring lock: {:?}", err)) + .and_then(|mut writer| { + if timestamp < writer.current_timestamp { + Err(anyhow!( + "timestamp ({}) must be greater than current timestamp ({})", + timestamp, + writer.current_timestamp + )) + } else { + writer.current_timestamp = timestamp; + Ok(timestamp) + } + }) + } + + /// Set the current timestamp for the node. + /// Warning: This will allow you to move backwards in time, which may cause new blocks to appear to be + /// mined before old blocks. This will result in an invalid state. + /// + /// # Parameters + /// - `time`: The timestamp to set the time to + /// + /// # Returns + /// The difference between the `current_timestamp` and the new timestamp for the InMemoryNodeInner. + pub fn set_time(&self, time: u64) -> Result { + self.get_inner() + .write() + .map_err(|err| anyhow!("failed acquiring lock: {:?}", err)) + .map(|mut writer| { + let time_diff = (time as i128).saturating_sub(writer.current_timestamp as i128); + writer.current_timestamp = time; + time_diff + }) + } + + /// Force a single block to be mined. + /// + /// Will mine an empty block (containing zero transactions) + /// + /// # Returns + /// The string "0x0". + pub fn mine_block(&self) -> Result { + self.get_inner() + .write() + .map_err(|err| anyhow!("failed acquiring lock: {:?}", err)) + .map(|mut writer| { + utils::mine_empty_blocks(&mut writer, 1, 1000); + tracing::info!("👷 Mined block #{}", writer.current_miniblock); + "0x0".to_string() + }) + } + + /// Snapshot the state of the blockchain at the current block. Takes no parameters. Returns the id of the snapshot + /// that was created. A snapshot can only be reverted once. After a successful evm_revert, the same snapshot id cannot + /// be used again. Consider creating a new snapshot after each evm_revert if you need to revert to the same + /// point multiple times. + /// + /// # Returns + /// The `U64` identifier for this snapshot. + pub fn snapshot(&self) -> Result { + let snapshots = self.snapshots.clone(); + self.get_inner() + .write() + .map_err(|err| anyhow!("failed acquiring lock: {:?}", err)) + .and_then(|writer| { + // validate max snapshots + snapshots + .read() + .map_err(|err| anyhow!("failed acquiring read lock for snapshot: {:?}", err)) + .and_then(|snapshots| { + if snapshots.len() >= MAX_SNAPSHOTS as usize { + return Err(anyhow!( + "maximum number of '{}' snapshots exceeded", + MAX_SNAPSHOTS + )); + } + + Ok(()) + })?; + + // snapshot the node + let snapshot = writer.snapshot().map_err(|err| anyhow!("{}", err))?; + snapshots + .write() + .map(|mut snapshots| { + snapshots.push(snapshot); + tracing::info!("Created snapshot '{}'", snapshots.len()); + snapshots.len() + }) + .map_err(|err| anyhow!("failed storing snapshot: {:?}", err)) + .map(U64::from) + }) + } + + /// Revert the state of the blockchain to a previous snapshot. Takes a single parameter, + /// which is the snapshot id to revert to. This deletes the given snapshot, as well as any snapshots + /// taken after (e.g.: reverting to id 0x1 will delete snapshots with ids 0x1, 0x2, etc.) + /// + /// # Parameters + /// - `snapshot_id`: The snapshot id to revert. + /// + /// # Returns + /// `true` if a snapshot was reverted, otherwise `false`. + pub fn revert_snapshot(&self, snapshot_id: U64) -> Result { + let snapshots = self.snapshots.clone(); + self.get_inner() + .write() + .map_err(|err| anyhow!("failed acquiring lock: {:?}", err)) + .and_then(|mut writer| { + let mut snapshots = snapshots.write().map_err(|err| { + anyhow!("failed acquiring read lock for snapshots: {:?}", err) + })?; + let snapshot_id_index = snapshot_id.as_usize().saturating_sub(1); + if snapshot_id_index >= snapshots.len() { + return Err(anyhow!("no snapshot exists for the id '{}'", snapshot_id)); + } + + // remove all snapshots following the index and use the first snapshot for restore + let selected_snapshot = snapshots + .drain(snapshot_id_index..) + .next() + .expect("unexpected failure, value must exist"); + + tracing::info!("Reverting node to snapshot '{snapshot_id:?}'"); + writer + .restore_snapshot(selected_snapshot) + .map(|_| { + tracing::info!("Reverting node to snapshot '{snapshot_id:?}'"); + true + }) + .map_err(|err| anyhow!("{}", err)) + }) + } + + pub fn set_balance(&self, address: Address, balance: U256) -> Result { + self.get_inner() + .write() + .map_err(|err| anyhow!("failed acquiring lock: {:?}", err)) + .map(|mut writer| { + let balance_key = storage_key_for_eth_balance(&address); + writer + .fork_storage + .set_value(balance_key, u256_to_h256(balance)); + tracing::info!( + "👷 Balance for address {:?} has been manually set to {} Wei", + address, + balance + ); + true + }) + } + + pub fn set_nonce(&self, address: Address, nonce: U256) -> Result { + self.get_inner() + .write() + .map_err(|err| anyhow!("failed acquiring lock: {:?}", err)) + .and_then(|mut writer| { + let nonce_key = get_nonce_key(&address); + let full_nonce = writer.fork_storage.read_value(&nonce_key); + let (mut account_nonce, mut deployment_nonce) = + decompose_full_nonce(h256_to_u256(full_nonce)); + if account_nonce >= nonce { + return Err(anyhow!( + "Account Nonce is already set to a higher value ({}, requested {})", + account_nonce, + nonce + )); + } + account_nonce = nonce; + if deployment_nonce >= nonce { + return Err(anyhow!( + "Deployment Nonce is already set to a higher value ({}, requested {})", + deployment_nonce, + nonce + )); + } + deployment_nonce = nonce; + let enforced_full_nonce = nonces_to_full_nonce(account_nonce, deployment_nonce); + tracing::info!( + "👷 Nonces for address {:?} have been set to {}", + address, + nonce + ); + writer + .fork_storage + .set_value(nonce_key, u256_to_h256(enforced_full_nonce)); + Ok(true) + }) + } + + pub fn mine_blocks(&self, num_blocks: Option, interval: Option) -> Result { + self.get_inner() + .write() + .map_err(|err| anyhow!("failed acquiring lock: {:?}", err)) + .and_then(|mut writer| { + let num_blocks = num_blocks.unwrap_or_else(|| U64::from(1)); + let interval_ms = interval + .unwrap_or_else(|| U64::from(1)) + .saturating_mul(1_000.into()); + if num_blocks.is_zero() { + return Err(anyhow!( + "Number of blocks must be greater than 0".to_string(), + )); + } + utils::mine_empty_blocks(&mut writer, num_blocks.as_u64(), interval_ms.as_u64()); + tracing::info!("👷 Mined {} blocks", num_blocks); + + Ok(true) + }) + } + + pub fn impersonate_account(&self, address: Address) -> Result { + self.get_inner() + .write() + .map_err(|err| anyhow!("failed acquiring lock: {:?}", err)) + .map(|mut writer| { + if writer.set_impersonated_account(address) { + tracing::info!("🕵️ Account {:?} has been impersonated", address); + true + } else { + tracing::info!("🕵️ Account {:?} was already impersonated", address); + false + } + }) + } + + pub fn stop_impersonating_account(&self, address: Address) -> Result { + self.get_inner() + .write() + .map_err(|err| anyhow!("failed acquiring lock: {:?}", err)) + .map(|mut writer| { + if writer.stop_impersonating_account(address) { + tracing::info!("🕵️ Stopped impersonating account {:?}", address); + true + } else { + tracing::info!( + "🕵️ Account {:?} was not impersonated, nothing to stop", + address + ); + false + } + }) + } + + pub fn set_code(&self, address: Address, code: Vec) -> Result<()> { + self.get_inner() + .write() + .map_err(|err| anyhow!("failed acquiring lock: {:?}", err)) + .map(|mut writer| { + let code_key = get_code_key(&address); + tracing::info!("set code for address {address:#x}"); + let (hash, code) = bytecode_to_factory_dep(code); + let hash = u256_to_h256(hash); + writer.fork_storage.store_factory_dep( + hash, + code.iter() + .flat_map(|entry| { + let mut bytes = vec![0u8; 32]; + entry.to_big_endian(&mut bytes); + bytes.to_vec() + }) + .collect(), + ); + writer.fork_storage.set_value(code_key, hash); + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{http_fork_source::HttpForkSource, node::InMemoryNode}; + use std::str::FromStr; + use zksync_basic_types::{Nonce, H256}; + use zksync_core::api_server::web3::backend_jsonrpc::namespaces::eth::EthNamespaceT; + use zksync_types::{api::BlockNumber, fee::Fee, l2::L2Tx, PackedEthSignature}; + + #[tokio::test] + async fn test_set_balance() { + let address = Address::from_str("0x36615Cf349d7F6344891B1e7CA7C72883F5dc049").unwrap(); + let node = InMemoryNode::::default(); + + let balance_before = node.get_balance(address, None).await.unwrap(); + + let result = node.set_balance(address, U256::from(1337)).unwrap(); + assert!(result); + + let balance_after = node.get_balance(address, None).await.unwrap(); + assert_eq!(balance_after, U256::from(1337)); + assert_ne!(balance_before, balance_after); + } + + #[tokio::test] + async fn test_set_nonce() { + let address = Address::from_str("0x36615Cf349d7F6344891B1e7CA7C72883F5dc049").unwrap(); + let node = InMemoryNode::::default(); + + let nonce_before = node.get_transaction_count(address, None).await.unwrap(); + + let result = node.set_nonce(address, U256::from(1337)).unwrap(); + assert!(result); + + let nonce_after = node.get_transaction_count(address, None).await.unwrap(); + assert_eq!(nonce_after, U256::from(1337)); + assert_ne!(nonce_before, nonce_after); + + // setting nonce lower than the current one should fail + let result = node.set_nonce(address, U256::from(1336)); + assert!(result.is_err()); + } + + #[tokio::test] + async fn test_mine_blocks_default() { + let node = InMemoryNode::::default(); + + let start_block = node + .get_block_by_number(zksync_types::api::BlockNumber::Latest, false) + .await + .unwrap() + .expect("block exists"); + + // test with defaults + let result = node.mine_blocks(None, None).expect("mine_blocks"); + assert!(result); + + let current_block = node + .get_block_by_number(zksync_types::api::BlockNumber::Latest, false) + .await + .unwrap() + .expect("block exists"); + + assert_eq!(start_block.number + 1, current_block.number); + assert_eq!(start_block.timestamp + 1, current_block.timestamp); + let result = node.mine_blocks(None, None).expect("mine_blocks"); + assert!(result); + + let current_block = node + .get_block_by_number(zksync_types::api::BlockNumber::Latest, false) + .await + .unwrap() + .expect("block exists"); + + assert_eq!(start_block.number + 2, current_block.number); + assert_eq!(start_block.timestamp + 2, current_block.timestamp); + } + + #[tokio::test] + async fn test_mine_blocks() { + let node = InMemoryNode::::default(); + + let start_block = node + .get_block_by_number(zksync_types::api::BlockNumber::Latest, false) + .await + .unwrap() + .expect("block exists"); + + let num_blocks = 5; + let interval = 3; + let start_timestamp = start_block.timestamp + 1; + + let result = node + .mine_blocks(Some(U64::from(num_blocks)), Some(U64::from(interval))) + .expect("mine blocks"); + assert!(result); + + for i in 0..num_blocks { + let current_block = node + .get_block_by_number(BlockNumber::Number(start_block.number + i + 1), false) + .await + .unwrap() + .expect("block exists"); + assert_eq!(start_block.number + i + 1, current_block.number); + assert_eq!( + start_timestamp + i * interval * 1_000, + current_block.timestamp + ); + } + } + + #[tokio::test] + async fn test_impersonate_account() { + let node = InMemoryNode::::default(); + let to_impersonate = + Address::from_str("0xd8da6bf26964af9d7eed9e03e53415d37aa96045").unwrap(); + + // give impersonated account some balance + let result = node.set_balance(to_impersonate, U256::exp10(18)).unwrap(); + assert!(result); + + // construct a tx + let mut tx = L2Tx::new( + Address::random(), + vec![], + Nonce(0), + Fee { + gas_limit: U256::from(1_000_000), + max_fee_per_gas: U256::from(250_000_000), + max_priority_fee_per_gas: U256::from(250_000_000), + gas_per_pubdata_limit: U256::from(20000), + }, + to_impersonate, + U256::one(), + None, + Default::default(), + ); + tx.set_input(vec![], H256::random()); + if tx.common_data.signature.is_empty() { + tx.common_data.signature = PackedEthSignature::default().serialize_packed().into(); + } + + // try to execute the tx- should fail without signature + assert!(node.apply_txs(vec![tx.clone()]).is_err()); + + // impersonate the account + let result = node + .impersonate_account(to_impersonate) + .expect("impersonate_account"); + + // result should be true + assert!(result); + + // impersonating the same account again should return false + let result = node + .impersonate_account(to_impersonate) + .expect("impersonate_account"); + assert!(!result); + + // execution should now succeed + assert!(node.apply_txs(vec![tx.clone()]).is_ok()); + + // stop impersonating the account + let result = node + .stop_impersonating_account(to_impersonate) + .expect("stop_impersonating_account"); + + // result should be true + assert!(result); + + // stop impersonating the same account again should return false + let result = node + .stop_impersonating_account(to_impersonate) + .expect("stop_impersonating_account"); + assert!(!result); + + // execution should now fail again + assert!(node.apply_txs(vec![tx]).is_err()); + } + + #[tokio::test] + async fn test_set_code() { + let address = Address::repeat_byte(0x1); + let node = InMemoryNode::::default(); + let new_code = vec![0x1u8; 32]; + + let code_before = node + .get_code(address, None) + .await + .expect("failed getting code") + .0; + assert_eq!(Vec::::default(), code_before); + + node.set_code(address, new_code.clone()) + .expect("failed setting code"); + + let code_after = node + .get_code(address, None) + .await + .expect("failed getting code") + .0; + assert_eq!(new_code, code_after); + } + + #[tokio::test] + async fn test_increase_time_zero_value() { + let node = InMemoryNode::::default(); + + let increase_value_seconds = 0u64; + let timestamp_before = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .expect("failed reading timestamp"); + let expected_response = increase_value_seconds; + + let actual_response = node + .increase_time(increase_value_seconds) + .expect("failed increasing timestamp"); + let timestamp_after = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .expect("failed reading timestamp"); + + assert_eq!(expected_response, actual_response, "erroneous response"); + assert_eq!( + increase_value_seconds.saturating_mul(1000u64), + timestamp_after.saturating_sub(timestamp_before), + "timestamp did not increase by the specified amount", + ); + } + + #[tokio::test] + async fn test_increase_time_max_value() { + let node = InMemoryNode::::default(); + + let increase_value_seconds = u64::MAX; + let timestamp_before = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .expect("failed reading timestamp"); + assert_ne!(0, timestamp_before, "initial timestamp must be non zero",); + let expected_response = increase_value_seconds; + + let actual_response = node + .increase_time(increase_value_seconds) + .expect("failed increasing timestamp"); + let timestamp_after = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .expect("failed reading timestamp"); + + assert_eq!(expected_response, actual_response, "erroneous response"); + assert_eq!( + u64::MAX, + timestamp_after, + "timestamp did not saturate upon increase", + ); + } + + #[tokio::test] + async fn test_increase_time() { + let node = InMemoryNode::::default(); + + let increase_value_seconds = 100u64; + let timestamp_before = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .expect("failed reading timestamp"); + let expected_response = increase_value_seconds; + + let actual_response = node + .increase_time(increase_value_seconds) + .expect("failed increasing timestamp"); + let timestamp_after = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .expect("failed reading timestamp"); + + assert_eq!(expected_response, actual_response, "erroneous response"); + assert_eq!( + increase_value_seconds.saturating_mul(1000u64), + timestamp_after.saturating_sub(timestamp_before), + "timestamp did not increase by the specified amount", + ); + } + + #[tokio::test] + async fn test_set_next_block_timestamp_future() { + let node = InMemoryNode::::default(); + + let new_timestamp = 10_000u64; + let timestamp_before = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .expect("failed reading timestamp"); + assert_ne!( + timestamp_before, new_timestamp, + "timestamps must be different" + ); + let expected_response = new_timestamp; + + let actual_response = node + .set_next_block_timestamp(new_timestamp) + .expect("failed setting timestamp"); + let timestamp_after = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .expect("failed reading timestamp"); + + assert_eq!(expected_response, actual_response, "erroneous response"); + assert_eq!( + new_timestamp, timestamp_after, + "timestamp was not set correctly", + ); + } + + #[tokio::test] + async fn test_set_next_block_timestamp_past_fails() { + let node = InMemoryNode::::default(); + + let timestamp_before = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .expect("failed reading timestamp"); + + let new_timestamp = timestamp_before + 500; + node.set_next_block_timestamp(new_timestamp) + .expect("failed setting timestamp"); + + let result = node.set_next_block_timestamp(timestamp_before); + + assert!(result.is_err(), "expected an error for timestamp in past"); + } + + #[tokio::test] + async fn test_set_next_block_timestamp_same_value() { + let node = InMemoryNode::::default(); + + let new_timestamp = 1000u64; + let timestamp_before = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .expect("failed reading timestamp"); + assert_eq!(timestamp_before, new_timestamp, "timestamps must be same"); + let expected_response = new_timestamp; + + let actual_response = node + .set_next_block_timestamp(new_timestamp) + .expect("failed setting timestamp"); + let timestamp_after = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .expect("failed reading timestamp"); + + assert_eq!(expected_response, actual_response, "erroneous response"); + assert_eq!( + timestamp_before, timestamp_after, + "timestamp must not change", + ); + } + + #[tokio::test] + async fn test_set_time_future() { + let node = InMemoryNode::::default(); + + let new_time = 10_000u64; + let timestamp_before = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .expect("failed reading timestamp"); + assert_ne!(timestamp_before, new_time, "timestamps must be different"); + let expected_response = 9000; + + let actual_response = node.set_time(new_time).expect("failed setting timestamp"); + let timestamp_after = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .expect("failed reading timestamp"); + + assert_eq!(expected_response, actual_response, "erroneous response"); + assert_eq!(new_time, timestamp_after, "timestamp was not set correctly",); + } + + #[tokio::test] + async fn test_set_time_past() { + let node = InMemoryNode::::default(); + + let new_time = 10u64; + let timestamp_before = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .expect("failed reading timestamp"); + assert_ne!(timestamp_before, new_time, "timestamps must be different"); + let expected_response = -990; + + let actual_response = node.set_time(new_time).expect("failed setting timestamp"); + let timestamp_after = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .expect("failed reading timestamp"); + + assert_eq!(expected_response, actual_response, "erroneous response"); + assert_eq!(new_time, timestamp_after, "timestamp was not set correctly",); + } + + #[tokio::test] + async fn test_set_time_same_value() { + let node = InMemoryNode::::default(); + + let new_time = 1000u64; + let timestamp_before = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .expect("failed reading timestamp"); + assert_eq!(timestamp_before, new_time, "timestamps must be same"); + let expected_response = 0; + + let actual_response = node.set_time(new_time).expect("failed setting timestamp"); + let timestamp_after = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .expect("failed reading timestamp"); + + assert_eq!(expected_response, actual_response, "erroneous response"); + assert_eq!( + timestamp_before, timestamp_after, + "timestamp must not change", + ); + } + + #[tokio::test] + async fn test_set_time_edges() { + let node = InMemoryNode::::default(); + + for new_time in [0, u64::MAX] { + let timestamp_before = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .unwrap_or_else(|_| panic!("case {}: failed reading timestamp", new_time)); + assert_ne!( + timestamp_before, new_time, + "case {new_time}: timestamps must be different" + ); + let expected_response = (new_time as i128).saturating_sub(timestamp_before as i128); + + let actual_response = node.set_time(new_time).expect("failed setting timestamp"); + let timestamp_after = node + .get_inner() + .read() + .map(|inner| inner.current_timestamp) + .unwrap_or_else(|_| panic!("case {}: failed reading timestamp", new_time)); + + assert_eq!( + expected_response, actual_response, + "case {new_time}: erroneous response" + ); + assert_eq!( + new_time, timestamp_after, + "case {new_time}: timestamp was not set correctly", + ); + } + } + + #[tokio::test] + async fn test_mine_block() { + let node = InMemoryNode::::default(); + + let start_block = node + .get_block_by_number(zksync_types::api::BlockNumber::Latest, false) + .await + .unwrap() + .expect("block exists"); + let result = node.mine_block().expect("mine_block"); + assert_eq!(&result, "0x0"); + + let current_block = node + .get_block_by_number(zksync_types::api::BlockNumber::Latest, false) + .await + .unwrap() + .expect("block exists"); + + assert_eq!(start_block.number + 1, current_block.number); + assert_eq!(start_block.timestamp + 1, current_block.timestamp); + + let result = node.mine_block().expect("mine_block"); + assert_eq!(&result, "0x0"); + + let current_block = node + .get_block_by_number(zksync_types::api::BlockNumber::Latest, false) + .await + .unwrap() + .expect("block exists"); + + assert_eq!(start_block.number + 2, current_block.number); + assert_eq!(start_block.timestamp + 2, current_block.timestamp); + } + + #[tokio::test] + async fn test_evm_snapshot_creates_incrementing_ids() { + let node = InMemoryNode::::default(); + + let snapshot_id_1 = node.snapshot().expect("failed creating snapshot 1"); + let snapshot_id_2 = node.snapshot().expect("failed creating snapshot 2"); + + assert_eq!(snapshot_id_1, U64::from(1)); + assert_eq!(snapshot_id_2, U64::from(2)); + } + + #[tokio::test] + async fn test_evm_revert_snapshot_restores_state() { + let node = InMemoryNode::::default(); + + let initial_block = node + .get_block_number() + .await + .expect("failed fetching block number"); + let snapshot_id = node.snapshot().expect("failed creating snapshot"); + node.mine_block().expect("mine_block"); + let current_block = node + .get_block_number() + .await + .expect("failed fetching block number"); + assert_eq!(current_block, initial_block + 1); + + let reverted = node + .revert_snapshot(snapshot_id) + .expect("failed reverting snapshot"); + assert!(reverted); + + let restored_block = node + .get_block_number() + .await + .expect("failed fetching block number"); + assert_eq!(restored_block, initial_block); + } + + #[tokio::test] + async fn test_evm_revert_snapshot_removes_all_snapshots_following_the_reverted_one() { + let node = InMemoryNode::::default(); + + let _snapshot_id_1 = node.snapshot().expect("failed creating snapshot"); + let snapshot_id_2 = node.snapshot().expect("failed creating snapshot"); + let _snapshot_id_3 = node.snapshot().expect("failed creating snapshot"); + assert_eq!(3, node.snapshots.read().unwrap().len()); + + let reverted = node + .revert_snapshot(snapshot_id_2) + .expect("failed reverting snapshot"); + assert!(reverted); + + assert_eq!(1, node.snapshots.read().unwrap().len()); + } + + #[tokio::test] + async fn test_evm_revert_snapshot_fails_for_invalid_snapshot_id() { + let node = InMemoryNode::::default(); + + let result = node.revert_snapshot(U64::from(100)); + assert!(result.is_err()); + } +} diff --git a/src/node/mod.rs b/src/node/mod.rs new file mode 100644 index 00000000..243f5876 --- /dev/null +++ b/src/node/mod.rs @@ -0,0 +1,13 @@ +//! In-memory node, that supports forking other networks. + +mod config; +mod debug; +mod eth; +mod evm; +mod hardhat; +mod in_memory; +mod in_memory_ext; +mod net; +mod zks; + +pub use in_memory::*; diff --git a/src/node/net.rs b/src/node/net.rs new file mode 100644 index 00000000..e61d60f9 --- /dev/null +++ b/src/node/net.rs @@ -0,0 +1,23 @@ +use zksync_basic_types::U256; + +use crate::{ + fork::ForkSource, + namespaces::{NetNamespaceT, Result}, + node::{InMemoryNode, TEST_NODE_NETWORK_ID}, +}; + +impl NetNamespaceT + for InMemoryNode +{ + fn net_version(&self) -> Result { + Ok(TEST_NODE_NETWORK_ID.to_string()) + } + + fn net_peer_count(&self) -> Result { + Ok(U256::from(0)) + } + + fn net_listening(&self) -> Result { + Ok(false) + } +} diff --git a/src/zks.rs b/src/node/zks.rs similarity index 87% rename from src/zks.rs rename to src/node/zks.rs index 2c9c7563..92f0ff18 100644 --- a/src/zks.rs +++ b/src/node/zks.rs @@ -1,15 +1,10 @@ -use std::{ - collections::HashMap, - sync::{Arc, RwLock}, -}; +use std::collections::HashMap; use bigdecimal::BigDecimal; +use colored::Colorize; use futures::FutureExt; use zksync_basic_types::{AccountTreeId, Address, L1BatchNumber, MiniblockNumber, U256}; -use zksync_core::api_server::web3::backend_jsonrpc::{ - error::{internal_error, into_jsrpc_error}, - namespaces::zks::ZksNamespaceT, -}; +use zksync_core::api_server::web3::backend_jsonrpc::error::{internal_error, into_jsrpc_error}; use zksync_state::ReadStorage; use zksync_types::{ api::{ @@ -28,25 +23,13 @@ use zksync_web3_decl::{ use crate::{ fork::ForkSource, - node::{InMemoryNodeInner, TransactionResult, L2_GAS_PRICE}, + namespaces::{RpcResult, ZksNamespaceT}, + node::{InMemoryNode, TransactionResult, L2_GAS_PRICE}, utils::{not_implemented, utc_datetime_from_epoch_ms, IntoBoxedFuture}, }; -use colored::Colorize; - -/// Mock implementation of ZksNamespace - used only in the test node. -pub struct ZkMockNamespaceImpl { - node: Arc>>, -} - -impl ZkMockNamespaceImpl { - /// Creates a new `Zks` instance with the given `node`. - pub fn new(node: Arc>>) -> Self { - Self { node } - } -} -impl ZksNamespaceT - for ZkMockNamespaceImpl +impl ZksNamespaceT + for InMemoryNode { /// Estimates the gas fee data required for a given call request. /// @@ -57,22 +40,15 @@ impl ZksNamespaceT /// # Returns /// /// A `BoxFuture` containing a `Result` with a `Fee` representing the estimated gas data required. - fn estimate_fee( - &self, - req: zksync_types::transaction_request::CallRequest, - ) -> jsonrpc_core::BoxFuture> { - let reader = match self.node.read() { - Ok(r) => r, - Err(_) => { - return futures::future::err(into_jsrpc_error(Web3Error::InternalError)).boxed() - } - }; - - let result: jsonrpc_core::Result = reader.estimate_gas_impl(req); - match result { - Ok(fee) => Ok(fee).into_boxed_future(), - Err(err) => return futures::future::err(err).boxed(), - } + fn estimate_fee(&self, req: zksync_types::transaction_request::CallRequest) -> RpcResult { + self.get_inner() + .read() + .map_err(|err| { + tracing::error!("failed acquiring lock: {:?}", err); + into_jsrpc_error(Web3Error::InternalError) + }) + .and_then(|reader| reader.estimate_gas_impl(req)) + .into_boxed_future() } /// Returns data of transactions in a block. @@ -87,8 +63,8 @@ impl ZksNamespaceT fn get_raw_block_transactions( &self, block_number: MiniblockNumber, - ) -> jsonrpc_core::BoxFuture>> { - let inner = self.node.clone(); + ) -> RpcResult> { + let inner = self.get_inner().clone(); Box::pin(async move { let reader = inner .read() @@ -148,26 +124,20 @@ impl ZksNamespaceT fn estimate_gas_l1_to_l2( &self, _req: zksync_types::transaction_request::CallRequest, - ) -> jsonrpc_core::BoxFuture> { + ) -> RpcResult { not_implemented("zks_estimateGasL1ToL2") } - fn get_main_contract( - &self, - ) -> jsonrpc_core::BoxFuture> { + fn get_main_contract(&self) -> RpcResult { not_implemented("zks_getMainContract") } - fn get_testnet_paymaster( - &self, - ) -> jsonrpc_core::BoxFuture>> { + fn get_testnet_paymaster(&self) -> RpcResult> { not_implemented("zks_getTestnetPaymaster") } - fn get_bridge_contracts( - &self, - ) -> jsonrpc_core::BoxFuture> { - let inner = self.node.clone(); + fn get_bridge_contracts(&self) -> RpcResult { + let inner = self.get_inner().clone(); Box::pin(async move { let reader = inner .read() @@ -197,9 +167,7 @@ impl ZksNamespaceT }) } - fn l1_chain_id( - &self, - ) -> jsonrpc_core::BoxFuture> { + fn l1_chain_id(&self) -> RpcResult { not_implemented("zks_L1ChainId") } @@ -208,7 +176,7 @@ impl ZksNamespaceT from: u32, limit: u8, ) -> jsonrpc_core::BoxFuture>> { - let inner = self.node.clone(); + let inner = self.get_inner().clone(); Box::pin(async move { let reader = inner .read() @@ -236,10 +204,7 @@ impl ZksNamespaceT }) } - fn get_token_price( - &self, - token_address: zksync_basic_types::Address, - ) -> jsonrpc_core::BoxFuture> { + fn get_token_price(&self, token_address: zksync_basic_types::Address) -> RpcResult { match format!("{:?}", token_address).to_lowercase().as_str() { "0x0000000000000000000000000000000000000000" => { // ETH @@ -286,7 +251,7 @@ impl ZksNamespaceT ) -> jsonrpc_core::BoxFuture< jsonrpc_core::Result>, > { - let inner = self.node.clone(); + let inner = self.get_inner().clone(); Box::pin({ self.get_confirmed_tokens(0, 100) .then(move |tokens| async move { @@ -321,8 +286,7 @@ impl ZksNamespaceT _sender: zksync_basic_types::Address, _msg: zksync_basic_types::H256, _l2_log_position: Option, - ) -> jsonrpc_core::BoxFuture>> - { + ) -> RpcResult> { not_implemented("zks_getL2ToL1MsgProof") } @@ -330,14 +294,11 @@ impl ZksNamespaceT &self, _tx_hash: zksync_basic_types::H256, _index: Option, - ) -> jsonrpc_core::BoxFuture>> - { + ) -> RpcResult> { not_implemented("zks_getL2ToL1LogProof") } - fn get_l1_batch_number( - &self, - ) -> jsonrpc_core::BoxFuture> { + fn get_l1_batch_number(&self) -> RpcResult { not_implemented("zks_L1BatchNumber") } @@ -353,9 +314,8 @@ impl ZksNamespaceT fn get_block_details( &self, block_number: zksync_basic_types::MiniblockNumber, - ) -> jsonrpc_core::BoxFuture>> - { - let inner = self.node.clone(); + ) -> RpcResult> { + let inner = self.get_inner().clone(); Box::pin(async move { let reader = inner .read() @@ -433,9 +393,8 @@ impl ZksNamespaceT fn get_transaction_details( &self, hash: zksync_basic_types::H256, - ) -> jsonrpc_core::BoxFuture>> - { - let inner = self.node.clone(); + ) -> RpcResult> { + let inner = self.get_inner().clone(); Box::pin(async move { let reader = inner .read() @@ -498,8 +457,7 @@ impl ZksNamespaceT fn get_l1_batch_details( &self, _batch: zksync_basic_types::L1BatchNumber, - ) -> jsonrpc_core::BoxFuture>> - { + ) -> RpcResult> { Box::pin(async { Ok(None) }) } @@ -512,11 +470,8 @@ impl ZksNamespaceT /// # Returns /// /// A boxed future resolving to a `jsonrpc_core::Result` containing an `Option` of bytes. - fn get_bytecode_by_hash( - &self, - hash: zksync_basic_types::H256, - ) -> jsonrpc_core::BoxFuture>>> { - let inner = self.node.clone(); + fn get_bytecode_by_hash(&self, hash: zksync_basic_types::H256) -> RpcResult>> { + let inner = self.get_inner().clone(); Box::pin(async move { let mut writer = inner .write() @@ -537,23 +492,15 @@ impl ZksNamespaceT }) } - fn get_l1_gas_price( - &self, - ) -> jsonrpc_core::BoxFuture> { + fn get_l1_gas_price(&self) -> RpcResult { not_implemented("zks_getL1GasPrice") } - fn get_protocol_version( - &self, - _version_id: Option, - ) -> jsonrpc_core::BoxFuture>> { + fn get_protocol_version(&self, _version_id: Option) -> RpcResult> { not_implemented("zks_getProtocolVersion") } - fn get_logs_with_virtual_blocks( - &self, - _filter: Filter, - ) -> jsonrpc_core::BoxFuture>> { + fn get_logs_with_virtual_blocks(&self, _filter: Filter) -> RpcResult> { not_implemented("zks_getLogs") } } @@ -577,7 +524,6 @@ mod tests { #[tokio::test] async fn test_estimate_fee() { let node = InMemoryNode::::default(); - let namespace = ZkMockNamespaceImpl::new(node.get_inner()); let mock_request = CallRequest { from: Some( @@ -602,10 +548,8 @@ mod tests { eip712_meta: None, }; - let result = namespace.estimate_fee(mock_request).await.unwrap(); - // Important: The gas value expectation is tied to a specific zksync-era dependency version. - // For the zksync-era commit hash `73a1e8ff564025d06e02c2689da238ae47bb10c3`, the anticipated gas value is 1086383. - // If the zksync-era dependency is updated, this expected gas value may need adjustment. + let result = node.estimate_fee(mock_request).await.unwrap(); + assert_eq!(result.gas_limit, U256::from(1086383)); assert_eq!(result.max_fee_per_gas, U256::from(250000000)); assert_eq!(result.max_priority_fee_per_gas, U256::from(0)); @@ -616,13 +560,12 @@ mod tests { async fn test_get_token_price_given_eth_should_return_price() { // Arrange let node = InMemoryNode::::default(); - let namespace = ZkMockNamespaceImpl::new(node.get_inner()); let mock_address = Address::from_str("0x0000000000000000000000000000000000000000") .expect("Failed to parse address"); // Act - let result = namespace.get_token_price(mock_address).await.unwrap(); + let result = node.get_token_price(mock_address).await.unwrap(); // Assert assert_eq!(result, BigDecimal::from(1_500)); @@ -632,13 +575,12 @@ mod tests { async fn test_get_token_price_given_capitalized_link_address_should_return_price() { // Arrange let node = InMemoryNode::::default(); - let namespace = ZkMockNamespaceImpl::new(node.get_inner()); let mock_address = Address::from_str("0x40609141Db628BeEE3BfAB8034Fc2D8278D0Cc78") .expect("Failed to parse address"); // Act - let result = namespace.get_token_price(mock_address).await.unwrap(); + let result = node.get_token_price(mock_address).await.unwrap(); // Assert assert_eq!(result, BigDecimal::from(1)); @@ -648,13 +590,12 @@ mod tests { async fn test_get_token_price_given_unknown_address_should_return_error() { // Arrange let node = InMemoryNode::::default(); - let namespace = ZkMockNamespaceImpl::new(node.get_inner()); let mock_address = Address::from_str("0x0000000000000000000000000000000000000042") .expect("Failed to parse address"); // Act - let result = namespace.get_token_price(mock_address).await; + let result = node.get_token_price(mock_address).await; // Assert assert!(result.is_err()); @@ -664,7 +605,6 @@ mod tests { async fn test_get_transaction_details_local() { // Arrange let node = InMemoryNode::::default(); - let namespace = ZkMockNamespaceImpl::new(node.get_inner()); let inner = node.get_inner(); { let mut writer = inner.write().unwrap(); @@ -682,8 +622,7 @@ mod tests { }, ); } - // Act - let result = namespace + let result = node .get_transaction_details(H256::repeat_byte(0x1)) .await .expect("get transaction details") @@ -734,8 +673,7 @@ mod tests { Default::default(), ); - let namespace = ZkMockNamespaceImpl::new(node.get_inner()); - let result = namespace + let result = node .get_transaction_details(input_tx_hash) .await .expect("get transaction details") @@ -749,7 +687,6 @@ mod tests { async fn test_get_block_details_local() { // Arrange let node = InMemoryNode::::default(); - let namespace = ZkMockNamespaceImpl::new(node.get_inner()); let inner = node.get_inner(); { let mut writer = inner.write().unwrap(); @@ -757,8 +694,7 @@ mod tests { writer.blocks.insert(H256::repeat_byte(0x1), block); writer.block_hashes.insert(0, H256::repeat_byte(0x1)); } - // Act - let result = namespace + let result = node .get_block_details(MiniblockNumber(0)) .await .expect("get block details") @@ -822,8 +758,7 @@ mod tests { Default::default(), ); - let namespace = ZkMockNamespaceImpl::new(node.get_inner()); - let result = namespace + let result = node .get_block_details(miniblock) .await .expect("get block details") @@ -838,7 +773,6 @@ mod tests { async fn test_get_bridge_contracts_uses_default_values_if_local() { // Arrange let node = InMemoryNode::::default(); - let namespace = ZkMockNamespaceImpl::new(node.get_inner()); let expected_bridge_addresses = BridgeAddresses { l1_erc20_default_bridge: Default::default(), l2_erc20_default_bridge: Default::default(), @@ -846,8 +780,7 @@ mod tests { l2_weth_bridge: Default::default(), }; - // Act - let actual_bridge_addresses = namespace + let actual_bridge_addresses = node .get_bridge_contracts() .await .expect("get bridge addresses"); @@ -893,10 +826,8 @@ mod tests { None, Default::default(), ); - let namespace = ZkMockNamespaceImpl::new(node.get_inner()); - // Act - let actual_bridge_addresses = namespace + let actual_bridge_addresses = node .get_bridge_contracts() .await .expect("get bridge addresses"); @@ -909,7 +840,6 @@ mod tests { async fn test_get_bytecode_by_hash_returns_local_value_if_available() { // Arrange let node = InMemoryNode::::default(); - let namespace = ZkMockNamespaceImpl::new(node.get_inner()); let input_hash = H256::repeat_byte(0x1); let input_bytecode = vec![0x1]; node.get_inner() @@ -918,8 +848,7 @@ mod tests { .fork_storage .store_factory_dep(input_hash, input_bytecode.clone()); - // Act - let actual = namespace + let actual = node .get_bytecode_by_hash(input_hash) .await .expect("failed fetching bytecode") @@ -960,10 +889,8 @@ mod tests { None, Default::default(), ); - let namespace = ZkMockNamespaceImpl::new(node.get_inner()); - // Act - let actual = namespace + let actual = node .get_bytecode_by_hash(input_hash) .await .expect("failed fetching bytecode") @@ -977,7 +904,6 @@ mod tests { async fn test_get_raw_block_transactions_local() { // Arrange let node = InMemoryNode::::default(); - let namespace = ZkMockNamespaceImpl::new(node.get_inner()); let inner = node.get_inner(); { let mut writer = inner.write().unwrap(); @@ -1001,8 +927,7 @@ mod tests { writer.block_hashes.insert(0, H256::repeat_byte(0x1)); } - // Act - let txns = namespace + let txns = node .get_raw_block_transactions(MiniblockNumber(0)) .await .expect("get transaction details"); @@ -1086,8 +1011,7 @@ mod tests { Default::default(), ); - let namespace = ZkMockNamespaceImpl::new(node.get_inner()); - let txns = namespace + let txns = node .get_raw_block_transactions(miniblock) .await .expect("get transaction details"); @@ -1097,8 +1021,7 @@ mod tests { #[tokio::test] async fn test_get_all_account_balances_empty() { let node = InMemoryNode::::default(); - let namespace = ZkMockNamespaceImpl::new(node.get_inner()); - let balances = namespace + let balances = node .get_all_account_balances(Address::zero()) .await .expect("get balances"); @@ -1108,8 +1031,7 @@ mod tests { #[tokio::test] async fn test_get_confirmed_tokens_eth() { let node = InMemoryNode::::default(); - let namespace = ZkMockNamespaceImpl::new(node.get_inner()); - let balances = namespace + let balances = node .get_confirmed_tokens(0, 100) .await .expect("get balances"); @@ -1222,7 +1144,6 @@ mod tests { None, Default::default(), ); - let namespace = ZkMockNamespaceImpl::new(node.get_inner()); { let inner = node.get_inner(); let writer = inner.write().unwrap(); @@ -1236,7 +1157,7 @@ mod tests { ); } - let balances = namespace + let balances = node .get_all_account_balances(Address::repeat_byte(0x1)) .await .expect("get balances"); diff --git a/src/testing.rs b/src/testing.rs index e6f77751..b6dfcfbb 100644 --- a/src/testing.rs +++ b/src/testing.rs @@ -358,7 +358,7 @@ impl RawTransactionsResponseBuilder { } /// Applies a transaction with a given hash to the node and returns the block hash. -pub fn apply_tx( +pub fn apply_tx( node: &InMemoryNode, tx_hash: H256, ) -> (H256, U64) { @@ -397,7 +397,7 @@ pub fn apply_tx( } /// Deploys a contract with the given bytecode. -pub fn deploy_contract( +pub fn deploy_contract( node: &InMemoryNode, tx_hash: H256, private_key: H256, From c5350bf89a25abc97304a86189fc983881308b8f Mon Sep 17 00:00:00 2001 From: Roman Petriv Date: Tue, 31 Oct 2023 16:19:49 +0200 Subject: [PATCH 2/6] fix: validate gas_limit and max_fee_per_gas before transaction execution (#207) * fix: validate gas_limit and max_fee_per_gas before transaction execution * fix: add validation for max_fee_per_gas being to low * fix: move validate_tx to node/in_memoty.ts * fix: lint * fix: move tx validation logic to run_l2_tx * test: add tests for tx validation * fix: remove unneeded match from eth.rs * fix: lint * fix: remove +nightly from vscode debug config * fix: remove extra new line log --- src/node/in_memory.rs | 90 +++++++++++++++++++++++++++++++++++++++ src/testing.rs | 98 +++++++++++++++++++++++++++++++------------ 2 files changed, 162 insertions(+), 26 deletions(-) diff --git a/src/node/in_memory.rs b/src/node/in_memory.rs index a718918f..095ccf53 100644 --- a/src/node/in_memory.rs +++ b/src/node/in_memory.rs @@ -1211,6 +1211,35 @@ impl InMemoryNode { } } + // Validates L2 transaction + fn validate_tx(&self, tx: &L2Tx) -> Result<(), String> { + let max_gas = U256::from(u32::MAX); + if tx.common_data.fee.gas_limit > max_gas + || tx.common_data.fee.gas_per_pubdata_limit > max_gas + { + return Err("exceeds block gas limit".into()); + } + + if tx.common_data.fee.max_fee_per_gas < L2_GAS_PRICE.into() { + tracing::info!( + "Submitted Tx is Unexecutable {:?} because of MaxFeePerGasTooLow {}", + tx.hash(), + tx.common_data.fee.max_fee_per_gas + ); + return Err("block base fee higher than max fee per gas".into()); + } + + if tx.common_data.fee.max_fee_per_gas < tx.common_data.fee.max_priority_fee_per_gas { + tracing::info!( + "Submitted Tx is Unexecutable {:?} because of MaxPriorityFeeGreaterThanMaxFee {}", + tx.hash(), + tx.common_data.fee.max_fee_per_gas + ); + return Err("max priority fee per gas higher than max fee per gas".into()); + } + Ok(()) + } + /// Executes the given L2 transaction and returns all the VM logs. pub fn run_l2_tx_inner( &self, @@ -1425,7 +1454,17 @@ impl InMemoryNode { /// Runs L2 transaction and commits it to a new block. pub fn run_l2_tx(&self, l2_tx: L2Tx, execution_mode: TxExecutionMode) -> Result<(), String> { let tx_hash = l2_tx.hash(); + tracing::info!(""); + tracing::info!("Validating {}", format!("{:?}", tx_hash).bold()); + + match self.validate_tx(&l2_tx) { + Ok(_) => (), + Err(e) => { + return Err(e); + } + }; + tracing::info!("Executing {}", format!("{:?}", tx_hash).bold()); { @@ -1639,3 +1678,54 @@ impl BlockContext { } } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::{http_fork_source::HttpForkSource, node::InMemoryNode, testing}; + + #[tokio::test] + async fn test_run_l2_tx_validates_tx_gas_limit_too_high() { + let node = InMemoryNode::::default(); + let tx = testing::TransactionBuilder::new() + .set_gas_limit(U256::from(u32::MAX) + 1) + .build(); + node.set_rich_account(tx.common_data.initiator_address); + + let result = node.run_l2_tx(tx, TxExecutionMode::VerifyExecute); + + assert_eq!(result.err(), Some("exceeds block gas limit".into())); + } + + #[tokio::test] + async fn test_run_l2_tx_validates_tx_max_fee_per_gas_too_low() { + let node = InMemoryNode::::default(); + let tx = testing::TransactionBuilder::new() + .set_max_fee_per_gas(U256::from(250_000_000 - 1)) + .build(); + node.set_rich_account(tx.common_data.initiator_address); + + let result = node.run_l2_tx(tx, TxExecutionMode::VerifyExecute); + + assert_eq!( + result.err(), + Some("block base fee higher than max fee per gas".into()) + ); + } + + #[tokio::test] + async fn test_run_l2_tx_validates_tx_max_priority_fee_per_gas_higher_than_max_fee_per_gas() { + let node = InMemoryNode::::default(); + let tx = testing::TransactionBuilder::new() + .set_max_priority_fee_per_gas(U256::from(250_000_000 + 1)) + .build(); + node.set_rich_account(tx.common_data.initiator_address); + + let result = node.run_l2_tx(tx, TxExecutionMode::VerifyExecute); + + assert_eq!( + result.err(), + Some("max priority fee per gas higher than max fee per gas".into()) + ); + } +} diff --git a/src/testing.rs b/src/testing.rs index b6dfcfbb..07346be5 100644 --- a/src/testing.rs +++ b/src/testing.rs @@ -19,10 +19,7 @@ use multivm::interface::{ExecutionResult, VmExecutionResultAndLogs}; use std::str::FromStr; use zksync_basic_types::{H160, U64}; use zksync_types::api::{BridgeAddresses, DebugCall, DebugCallType, Log}; -use zksync_types::{ - fee::Fee, l2::L2Tx, Address, L2ChainId, Nonce, PackedEthSignature, ProtocolVersionId, H256, - U256, -}; +use zksync_types::{fee::Fee, l2::L2Tx, Address, L2ChainId, Nonce, ProtocolVersionId, H256, U256}; /// Configuration for the [MockServer]'s initial block. #[derive(Default, Debug, Clone)] @@ -357,6 +354,75 @@ impl RawTransactionsResponseBuilder { } } +#[derive(Debug, Clone)] +pub struct TransactionBuilder { + tx_hash: H256, + from_account_private_key: H256, + gas_limit: U256, + max_fee_per_gas: U256, + max_priority_fee_per_gas: U256, +} + +impl Default for TransactionBuilder { + fn default() -> Self { + Self { + tx_hash: H256::repeat_byte(0x01), + from_account_private_key: H256::random(), + gas_limit: U256::from(1_000_000), + max_fee_per_gas: U256::from(250_000_000), + max_priority_fee_per_gas: U256::from(250_000_000), + } + } +} + +impl TransactionBuilder { + pub fn new() -> Self { + Self::default() + } + + pub fn set_hash(&mut self, hash: H256) -> &mut Self { + self.tx_hash = hash; + self + } + + pub fn set_gas_limit(&mut self, gas_limit: U256) -> &mut Self { + self.gas_limit = gas_limit; + self + } + + pub fn set_max_fee_per_gas(&mut self, max_fee_per_gas: U256) -> &mut Self { + self.max_fee_per_gas = max_fee_per_gas; + self + } + + pub fn set_max_priority_fee_per_gas(&mut self, max_priority_fee_per_gas: U256) -> &mut Self { + self.max_priority_fee_per_gas = max_priority_fee_per_gas; + self + } + + pub fn build(&mut self) -> L2Tx { + let mut tx = L2Tx::new_signed( + Address::random(), + vec![], + Nonce(0), + Fee { + gas_limit: self.gas_limit, + max_fee_per_gas: self.max_fee_per_gas, + max_priority_fee_per_gas: self.max_priority_fee_per_gas, + gas_per_pubdata_limit: U256::from(20000), + }, + U256::from(1), + L2ChainId::from(260), + &self.from_account_private_key, + None, + Default::default(), + ) + .unwrap(); + tx.set_input(vec![], self.tx_hash); + tx + } +} + /// Applies a transaction with a given hash to the node and returns the block hash. pub fn apply_tx( node: &InMemoryNode, @@ -369,28 +435,8 @@ pub fn apply_tx( .expect("failed getting current batch number"); let produced_block_hash = compute_hash(next_miniblock, tx_hash); - let private_key = H256::random(); - let from_account = PackedEthSignature::address_from_private_key(&private_key) - .expect("failed generating address"); - node.set_rich_account(from_account); - let mut tx = L2Tx::new_signed( - Address::random(), - vec![], - Nonce(0), - Fee { - gas_limit: U256::from(1_000_000), - max_fee_per_gas: U256::from(250_000_000), - max_priority_fee_per_gas: U256::from(250_000_000), - gas_per_pubdata_limit: U256::from(20000), - }, - U256::from(1), - L2ChainId::from(260), - &private_key, - None, - Default::default(), - ) - .unwrap(); - tx.set_input(vec![], tx_hash); + let tx = TransactionBuilder::new().set_hash(tx_hash).build(); + node.set_rich_account(tx.common_data.initiator_address); node.apply_txs(vec![tx]).expect("failed applying tx"); (produced_block_hash, U64::from(next_miniblock)) From 96632f9a3c6b717ca1f619b042407f462252cc70 Mon Sep 17 00:00:00 2001 From: Roman Petriv Date: Wed, 1 Nov 2023 15:10:39 +0200 Subject: [PATCH 3/6] feat: add parent hash linking for blocks (#209) * feat: add parent_hash linking for blocks * test: add tests * fix: lint * fix: use correct genesis block hash as initial current_miniblock_hash --- src/node/eth.rs | 68 +++++++++++++++++++++++++++++++++---------- src/node/in_memory.rs | 68 ++++++++++++++++++++++++++++++++++++++----- src/utils.rs | 7 ++++- 3 files changed, 119 insertions(+), 24 deletions(-) diff --git a/src/node/eth.rs b/src/node/eth.rs index cfd63267..fa478ca8 100644 --- a/src/node/eth.rs +++ b/src/node/eth.rs @@ -490,7 +490,7 @@ impl EthNamespa from: Some(info.tx.initiator_account()), to: Some(info.tx.recipient_account()), value: info.tx.execute.value, - gas_price: Default::default(), + gas_price: Some(U256::from(0)), gas: Default::default(), input: input_data.data.into(), v: Some(chain_id.into()), @@ -1375,19 +1375,57 @@ mod tests { } #[tokio::test] - async fn test_node_run_has_genesis_block() { + async fn test_node_has_genesis_block() { let node = InMemoryNode::::default(); let block = node .get_block_by_number(BlockNumber::Latest, false) .await - .expect("failed fetching block by hash") + .expect("failed fetching block by number") .expect("no block"); assert_eq!(0, block.number.as_u64()); assert_eq!(compute_hash(0, H256::zero()), block.hash); } + #[tokio::test] + async fn test_node_creates_genesis_block_with_hash_and_zero_parent_hash() { + let node = InMemoryNode::::default(); + + let block = node + .get_block_by_hash(compute_hash(0, H256::zero()), false) + .await + .expect("failed fetching block by hash") + .expect("no block"); + + assert_eq!(block.parent_hash, H256::zero()); + } + + #[tokio::test] + async fn test_node_produces_blocks_with_parent_hash_links() { + let node = InMemoryNode::::default(); + testing::apply_tx(&node, H256::repeat_byte(0x01)); + + let genesis_block = node + .get_block_by_number(BlockNumber::from(0), false) + .await + .expect("failed fetching block by number") + .expect("no block"); + let first_block = node + .get_block_by_number(BlockNumber::from(1), false) + .await + .expect("failed fetching block by number") + .expect("no block"); + let second_block = node + .get_block_by_number(BlockNumber::from(2), false) + .await + .expect("failed fetching block by number") + .expect("no block"); + + assert_eq!(genesis_block.hash, first_block.parent_hash); + assert_eq!(first_block.hash, second_block.parent_hash); + } + #[tokio::test] async fn test_get_block_by_hash_for_produced_block() { let node = InMemoryNode::::default(); @@ -2542,11 +2580,11 @@ mod tests { let storage = inner.fork_storage.inner.read().unwrap(); let expected_snapshot = Snapshot { - current_timestamp: inner.current_timestamp.clone(), - current_batch: inner.current_batch.clone(), - current_miniblock: inner.current_miniblock.clone(), - current_miniblock_hash: inner.current_miniblock_hash.clone(), - l1_gas_price: inner.l1_gas_price.clone(), + current_timestamp: inner.current_timestamp, + current_batch: inner.current_batch, + current_miniblock: inner.current_miniblock, + current_miniblock_hash: inner.current_miniblock_hash, + l1_gas_price: inner.l1_gas_price, tx_results: inner.tx_results.clone(), blocks: inner.blocks.clone(), block_hashes: inner.block_hashes.clone(), @@ -2646,11 +2684,11 @@ mod tests { let expected_snapshot = { let storage = inner.fork_storage.inner.read().unwrap(); Snapshot { - current_timestamp: inner.current_timestamp.clone(), - current_batch: inner.current_batch.clone(), - current_miniblock: inner.current_miniblock.clone(), - current_miniblock_hash: inner.current_miniblock_hash.clone(), - l1_gas_price: inner.l1_gas_price.clone(), + current_timestamp: inner.current_timestamp, + current_batch: inner.current_batch, + current_miniblock: inner.current_miniblock, + current_miniblock_hash: inner.current_miniblock_hash, + l1_gas_price: inner.l1_gas_price, tx_results: inner.tx_results.clone(), blocks: inner.blocks.clone(), block_hashes: inner.block_hashes.clone(), @@ -2984,7 +3022,7 @@ mod tests { .expect("no transaction"); assert_eq!(input_tx_hash, actual_tx.hash); - assert_eq!(Some(U64::from(input_block_number)), actual_tx.block_number); + assert_eq!(Some(input_block_number), actual_tx.block_number); } #[tokio::test] @@ -3010,7 +3048,7 @@ mod tests { TransactionResponseBuilder::new() .set_hash(input_tx_hash) .set_block_hash(input_block_hash) - .set_block_number(U64::from(input_block_number)) + .set_block_number(input_block_number) .build(), ); diff --git a/src/node/in_memory.rs b/src/node/in_memory.rs index 095ccf53..874fe16f 100644 --- a/src/node/in_memory.rs +++ b/src/node/in_memory.rs @@ -89,10 +89,21 @@ pub fn compute_hash(block_number: u64, tx_hash: H256) -> H256 { H256(keccak256(&digest)) } -pub fn create_empty_block(block_number: u64, timestamp: u64, batch: u32) -> Block { +pub fn create_empty_block( + block_number: u64, + timestamp: u64, + batch: u32, + parent_block_hash: Option, +) -> Block { let hash = compute_hash(block_number, H256::zero()); + let parent_hash = parent_block_hash.unwrap_or(if block_number == 0 { + H256::zero() + } else { + compute_hash(block_number - 1, H256::zero()) + }); Block { hash, + parent_hash, number: U64::from(block_number), timestamp: U256::from(timestamp), l1_batch_number: Some(U64::from(batch)), @@ -900,18 +911,19 @@ impl InMemoryNode { } } else { let mut block_hashes = HashMap::::new(); - block_hashes.insert(0, H256::zero()); + let block_hash = compute_hash(0, H256::zero()); + block_hashes.insert(0, block_hash); let mut blocks = HashMap::>::new(); blocks.insert( - H256::zero(), - create_empty_block(0, NON_FORK_FIRST_BLOCK_TIMESTAMP, 0), + block_hash, + create_empty_block(0, NON_FORK_FIRST_BLOCK_TIMESTAMP, 0, None), ); InMemoryNodeInner { current_timestamp: NON_FORK_FIRST_BLOCK_TIMESTAMP, current_batch: 0, current_miniblock: 0, - current_miniblock_hash: H256::zero(), + current_miniblock_hash: block_hash, l1_gas_price: L1_GAS_PRICE, tx_results: Default::default(), blocks, @@ -1241,7 +1253,7 @@ impl InMemoryNode { } /// Executes the given L2 transaction and returns all the VM logs. - pub fn run_l2_tx_inner( + fn run_l2_tx_inner( &self, l2_tx: L2Tx, execution_mode: TxExecutionMode, @@ -1418,8 +1430,11 @@ impl InMemoryNode { transaction.block_hash = Some(*block_hash); transaction.block_number = Some(U64::from(inner.current_miniblock)); + let parent_block_hash = *inner.block_hashes.get(&(block_ctx.miniblock - 1)).unwrap(); + let block = Block { hash, + parent_hash: parent_block_hash, number: U64::from(block_ctx.miniblock), timestamp: U256::from(batch_env.timestamp), l1_batch_number: Some(U64::from(batch_env.number.0)), @@ -1535,6 +1550,7 @@ impl InMemoryNode { l1_batch_number: block.l1_batch_number, from: l2_tx.initiator_account(), to: Some(l2_tx.recipient_account()), + root: Some(H256::zero()), cumulative_gas_used: Default::default(), gas_used: Some(l2_tx.common_data.fee.gas_limit - result.refunds.gas_refunded), contract_address: contract_address_from_tx_result(&result), @@ -1586,8 +1602,13 @@ impl InMemoryNode { // we are adding one l2 block at the end of each batch (to handle things like remaining events etc). // You can look at insert_fictive_l2_block function in VM to see how this fake block is inserted. let block_ctx = block_ctx.new_block(); - let empty_block_at_end_of_batch = - create_empty_block(block_ctx.miniblock, block_ctx.timestamp, block_ctx.batch); + let parent_block_hash = block.hash; + let empty_block_at_end_of_batch = create_empty_block( + block_ctx.miniblock, + block_ctx.timestamp, + block_ctx.batch, + Some(parent_block_hash), + ); inner.current_batch = inner.current_batch.saturating_add(1); @@ -1728,4 +1749,35 @@ mod tests { Some("max priority fee per gas higher than max fee per gas".into()) ); } + + #[tokio::test] + async fn test_create_empty_block_creates_genesis_block_with_hash_and_zero_parent_hash() { + let first_block = create_empty_block::(0, 1000, 1, None); + + assert_eq!(first_block.hash, compute_hash(0, H256::zero())); + assert_eq!(first_block.parent_hash, H256::zero()); + } + + #[tokio::test] + async fn test_create_empty_block_creates_block_with_parent_hash_link_to_prev_block() { + let first_block = create_empty_block::(0, 1000, 1, None); + let second_block = create_empty_block::(1, 1000, 1, None); + + assert_eq!(second_block.parent_hash, first_block.hash); + } + + #[tokio::test] + async fn test_create_empty_block_creates_block_with_parent_hash_link_to_provided_parent_hash() { + let first_block = create_empty_block::( + 0, + 1000, + 1, + Some(compute_hash(123, H256::zero())), + ); + let second_block = + create_empty_block::(1, 1000, 1, Some(first_block.hash)); + + assert_eq!(first_block.parent_hash, compute_hash(123, H256::zero())); + assert_eq!(second_block.parent_hash, first_block.hash); + } } diff --git a/src/utils.rs b/src/utils.rs index b1af6554..d22eb814 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -153,7 +153,12 @@ pub fn mine_empty_blocks( ) } - let block = create_empty_block(block_ctx.miniblock, block_ctx.timestamp, block_ctx.batch); + let block = create_empty_block( + block_ctx.miniblock, + block_ctx.timestamp, + block_ctx.batch, + None, + ); node.block_hashes.insert(block.number.as_u64(), block.hash); node.blocks.insert(block.hash, block); From 57ddf946f88d3b1580f82ec67993ccd690ed3d70 Mon Sep 17 00:00:00 2001 From: Nicolas Villanueva Date: Wed, 1 Nov 2023 08:18:17 -0700 Subject: [PATCH 4/6] chore: Update zksync-era dependency to latest (#210) * chore: Update zksync-era dependency to latest * add Version17 to list of supported version --- Cargo.lock | 67 ++++++++++++++++++++++++++----------------------- Cargo.toml | 18 ++++++------- src/deps/mod.rs | 13 ++++++++++ src/fork.rs | 15 +++++++++++ 4 files changed, 72 insertions(+), 41 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1655c87a..66247424 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2237,7 +2237,7 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "era_test_node" -version = "0.1.0-alpha.9" +version = "0.1.0-alpha.10" dependencies = [ "anyhow", "bigdecimal", @@ -4593,7 +4593,7 @@ dependencies = [ [[package]] name = "multivm" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "anyhow", "hex", @@ -4603,7 +4603,7 @@ dependencies = [ "tracing", "vise", "zk_evm 1.3.1", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc1)", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", "zksync_contracts", "zksync_state", "zksync_system_constants", @@ -5686,7 +5686,7 @@ dependencies = [ [[package]] name = "prometheus_exporter" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "anyhow", "metrics", @@ -8182,7 +8182,7 @@ dependencies = [ [[package]] name = "vlog" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "chrono", "sentry", @@ -8606,7 +8606,7 @@ dependencies = [ [[package]] name = "zk_evm" version = "1.3.3" -source = "git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc1#fe8215a7047d24430ad470cf15a19bedb4d6ba0b" +source = "git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2#fbee20f5bac7d6ca3e22ae69b2077c510a07de4e" dependencies = [ "anyhow", "lazy_static", @@ -8797,7 +8797,7 @@ dependencies = [ [[package]] name = "zksync_basic_types" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "serde", "serde_json", @@ -8807,7 +8807,7 @@ dependencies = [ [[package]] name = "zksync_circuit_breaker" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "anyhow", "async-trait", @@ -8830,7 +8830,7 @@ dependencies = [ [[package]] name = "zksync_commitment_utils" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "zkevm_test_harness 1.4.0", "zksync_types", @@ -8840,7 +8840,7 @@ dependencies = [ [[package]] name = "zksync_config" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "anyhow", "bigdecimal", @@ -8859,7 +8859,7 @@ dependencies = [ [[package]] name = "zksync_contracts" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "envy", "ethabi 18.0.0", @@ -8873,7 +8873,7 @@ dependencies = [ [[package]] name = "zksync_core" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "actix-cors", "actix-rt", @@ -8937,7 +8937,7 @@ dependencies = [ [[package]] name = "zksync_crypto" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "base64 0.13.1", "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -8952,7 +8952,7 @@ dependencies = [ [[package]] name = "zksync_dal" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "anyhow", "bigdecimal", @@ -8961,6 +8961,7 @@ dependencies = [ "itertools 0.10.5", "num 0.3.1", "once_cell", + "rand 0.8.5", "serde", "serde_json", "sqlx", @@ -8968,6 +8969,7 @@ dependencies = [ "thiserror", "tokio", "tracing", + "url", "vise", "zksync_contracts", "zksync_health_check", @@ -8979,7 +8981,7 @@ dependencies = [ [[package]] name = "zksync_eth_client" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "anyhow", "async-trait", @@ -8999,7 +9001,7 @@ dependencies = [ [[package]] name = "zksync_eth_signer" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "async-trait", "hex", @@ -9018,7 +9020,7 @@ dependencies = [ [[package]] name = "zksync_health_check" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "async-trait", "futures 0.3.28", @@ -9031,7 +9033,7 @@ dependencies = [ [[package]] name = "zksync_mempool" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "tracing", "zksync_types", @@ -9040,7 +9042,7 @@ dependencies = [ [[package]] name = "zksync_merkle_tree" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "leb128", "once_cell", @@ -9051,12 +9053,13 @@ dependencies = [ "zksync_crypto", "zksync_storage", "zksync_types", + "zksync_utils", ] [[package]] name = "zksync_mini_merkle_tree" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "once_cell", "zksync_basic_types", @@ -9066,7 +9069,7 @@ dependencies = [ [[package]] name = "zksync_object_store" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "anyhow", "async-trait", @@ -9084,7 +9087,7 @@ dependencies = [ [[package]] name = "zksync_prover_utils" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "anyhow", "async-trait", @@ -9104,7 +9107,7 @@ dependencies = [ [[package]] name = "zksync_queued_job_processor" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "anyhow", "async-trait", @@ -9116,7 +9119,7 @@ dependencies = [ [[package]] name = "zksync_state" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "anyhow", "itertools 0.10.5", @@ -9133,7 +9136,7 @@ dependencies = [ [[package]] name = "zksync_storage" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "num_cpus", "once_cell", @@ -9145,7 +9148,7 @@ dependencies = [ [[package]] name = "zksync_system_constants" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "anyhow", "bigdecimal", @@ -9163,7 +9166,7 @@ dependencies = [ [[package]] name = "zksync_types" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "chrono", @@ -9180,7 +9183,7 @@ dependencies = [ "serde_with", "strum 0.24.1", "thiserror", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc1)", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", "zkevm_test_harness 1.3.3", "zksync_basic_types", "zksync_contracts", @@ -9192,7 +9195,7 @@ dependencies = [ [[package]] name = "zksync_utils" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "anyhow", "bigdecimal", @@ -9207,14 +9210,14 @@ dependencies = [ "tokio", "tracing", "vlog", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc1)", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", "zksync_basic_types", ] [[package]] name = "zksync_verification_key_generator_and_server" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "anyhow", "bincode", @@ -9234,7 +9237,7 @@ dependencies = [ [[package]] name = "zksync_web3_decl" version = "0.1.0" -source = "git+https://github.com/matter-labs/zksync-era.git?rev=73a1e8ff564025d06e02c2689da238ae47bb10c3#73a1e8ff564025d06e02c2689da238ae47bb10c3" +source = "git+https://github.com/matter-labs/zksync-era.git?rev=80273264a9512bc1e6f1d1f4372107f9167260b1#80273264a9512bc1e6f1d1f4372107f9167260b1" dependencies = [ "bigdecimal", "chrono", diff --git a/Cargo.toml b/Cargo.toml index 81997b4a..81098bc3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "era_test_node" -version = "0.1.0-alpha.9" +version = "0.1.0-alpha.10" edition = "2018" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" @@ -11,14 +11,14 @@ categories = ["cryptography"] publish = false # We don't want to publish our binaries. [dependencies] -zksync_basic_types = { git = "https://github.com/matter-labs/zksync-era.git", rev = "73a1e8ff564025d06e02c2689da238ae47bb10c3" } -zksync_core = { git = "https://github.com/matter-labs/zksync-era.git", rev = "73a1e8ff564025d06e02c2689da238ae47bb10c3" } -multivm = { git = "https://github.com/matter-labs/zksync-era.git", rev = "73a1e8ff564025d06e02c2689da238ae47bb10c3" } -zksync_contracts = { git = "https://github.com/matter-labs/zksync-era.git", rev = "73a1e8ff564025d06e02c2689da238ae47bb10c3" } -zksync_types = { git = "https://github.com/matter-labs/zksync-era.git", rev = "73a1e8ff564025d06e02c2689da238ae47bb10c3" } -zksync_utils = { git = "https://github.com/matter-labs/zksync-era.git", rev = "73a1e8ff564025d06e02c2689da238ae47bb10c3" } -zksync_state = { git = "https://github.com/matter-labs/zksync-era.git", rev = "73a1e8ff564025d06e02c2689da238ae47bb10c3" } -zksync_web3_decl = { git = "https://github.com/matter-labs/zksync-era.git", rev = "73a1e8ff564025d06e02c2689da238ae47bb10c3" } +zksync_basic_types = { git = "https://github.com/matter-labs/zksync-era.git", rev = "80273264a9512bc1e6f1d1f4372107f9167260b1" } +zksync_core = { git = "https://github.com/matter-labs/zksync-era.git", rev = "80273264a9512bc1e6f1d1f4372107f9167260b1" } +multivm = { git = "https://github.com/matter-labs/zksync-era.git", rev = "80273264a9512bc1e6f1d1f4372107f9167260b1" } +zksync_contracts = { git = "https://github.com/matter-labs/zksync-era.git", rev = "80273264a9512bc1e6f1d1f4372107f9167260b1" } +zksync_types = { git = "https://github.com/matter-labs/zksync-era.git", rev = "80273264a9512bc1e6f1d1f4372107f9167260b1" } +zksync_utils = { git = "https://github.com/matter-labs/zksync-era.git", rev = "80273264a9512bc1e6f1d1f4372107f9167260b1" } +zksync_state = { git = "https://github.com/matter-labs/zksync-era.git", rev = "80273264a9512bc1e6f1d1f4372107f9167260b1" } +zksync_web3_decl = { git = "https://github.com/matter-labs/zksync-era.git", rev = "80273264a9512bc1e6f1d1f4372107f9167260b1" } sha3 = "0.10.6" diff --git a/src/deps/mod.rs b/src/deps/mod.rs index f6c721bc..2ba82297 100644 --- a/src/deps/mod.rs +++ b/src/deps/mod.rs @@ -68,6 +68,12 @@ impl ReadStorage for &InMemoryStorage { fn load_factory_dep(&mut self, hash: H256) -> Option> { self.factory_deps.get(&hash).cloned() } + + fn get_enumeration_index(&mut self, _key: &StorageKey) -> Option { + // TODO: Update this file to use proper enumeration index value once it's exposed for forks via API + // This should happen as the migration of Boojum completes + Some(0_u64) + } } impl ReadStorage for InMemoryStorage { @@ -82,6 +88,10 @@ impl ReadStorage for InMemoryStorage { fn load_factory_dep(&mut self, hash: H256) -> Option> { (&*self).load_factory_dep(hash) } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + (&*self).get_enumeration_index(key) + } } /// Functionality to read from the VM storage. @@ -103,6 +113,9 @@ pub trait ReadStorage: fmt::Debug { let code_key = get_known_code_key(bytecode_hash); self.read_value(&code_key) != H256::zero() } + + /// Retrieves the enumeration index for a given `key`. + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option; } /// Functionality to write to the VM storage in a batch. diff --git a/src/fork.rs b/src/fork.rs index 04e27dc1..5c0c9e60 100644 --- a/src/fork.rs +++ b/src/fork.rs @@ -148,6 +148,12 @@ impl ForkStorage { local_storage } } + + /// Retrieves the enumeration index for a given `key`. + fn get_enumeration_index_internal(&self, _key: &StorageKey) -> Option { + // TODO: Update this file to use proper enumeration index value once it's exposed for forks via API + Some(0_u64) + } } impl ReadStorage for ForkStorage { @@ -163,6 +169,10 @@ impl ReadStorage for ForkStorage { fn read_value(&mut self, key: &StorageKey) -> zksync_types::StorageValue { self.read_value_internal(key) } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + self.get_enumeration_index_internal(key) + } } impl ReadStorage for &ForkStorage { @@ -178,6 +188,10 @@ impl ReadStorage for &ForkStorage { fn load_factory_dep(&mut self, hash: H256) -> Option> { self.load_factory_dep_internal(hash) } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + self.get_enumeration_index_internal(key) + } } impl ForkStorage { @@ -291,6 +305,7 @@ const SUPPORTED_VERSIONS: &[ProtocolVersionId] = &[ ProtocolVersionId::Version14, ProtocolVersionId::Version15, ProtocolVersionId::Version16, + ProtocolVersionId::Version17, ]; pub fn supported_protocol_versions(version: ProtocolVersionId) -> bool { From 0a5c8337b32410df7ae70b4c1916e4651ca4e77c Mon Sep 17 00:00:00 2001 From: Dustin Brickwood Date: Mon, 6 Nov 2023 10:53:11 -0600 Subject: [PATCH 5/6] chore: add rust toolchain for version mgt, make inner_tx pub (#213) * chore: add rust toolchain for version mgt, update to make inner tx pub again * chore: change version to reflect ci --- rust-toolchain.yaml | 3 +++ src/node/in_memory.rs | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 rust-toolchain.yaml diff --git a/rust-toolchain.yaml b/rust-toolchain.yaml new file mode 100644 index 00000000..bc962f33 --- /dev/null +++ b/rust-toolchain.yaml @@ -0,0 +1,3 @@ +[toolchain] +channel = "nightly-2023-07-23" +components = ["rustfmt", "clippy"] \ No newline at end of file diff --git a/src/node/in_memory.rs b/src/node/in_memory.rs index 874fe16f..6e7d170d 100644 --- a/src/node/in_memory.rs +++ b/src/node/in_memory.rs @@ -1253,7 +1253,7 @@ impl InMemoryNode { } /// Executes the given L2 transaction and returns all the VM logs. - fn run_l2_tx_inner( + pub fn run_l2_tx_inner( &self, l2_tx: L2Tx, execution_mode: TxExecutionMode, From a74c39c0d89aec29086524f9a8dd13bf2e4e03f6 Mon Sep 17 00:00:00 2001 From: Nisheeth Barthwal Date: Tue, 7 Nov 2023 23:56:21 +0530 Subject: [PATCH 6/6] fix: rename run_l2_tx_inner, fix panics for lib users (#214) * rename run_l2_tx_inner, fix panics for lib users * add test * undo dep changes * cleanup comments * inline params * cleanup ExternalStorage * fix test messages --- src/node/eth.rs | 2 +- src/node/in_memory.rs | 73 +++++++++++++--- src/testing.rs | 191 +++++++++++++++++++++++++++++++++++++++++- 3 files changed, 252 insertions(+), 14 deletions(-) diff --git a/src/node/eth.rs b/src/node/eth.rs index fa478ca8..2c7cf4b7 100644 --- a/src/node/eth.rs +++ b/src/node/eth.rs @@ -814,7 +814,7 @@ impl EthNamespa /// /// A `BoxFuture` containing a `jsonrpc_core::Result` that resolves to an array of logs, block hashes, or transaction hashes, /// depending on the filter type, which occurred since last poll. - /// * Filters created with `eth_newFilter` return [Log] objects. + /// * Filters created with `eth_newFilter` return [zksync_types::api::Log] objects. /// * Filters created with `eth_newBlockFilter` return block hashes. /// * Filters created with `eth_newPendingTransactionFilter` return transaction hashes. fn get_filter_changes(&self, id: U256) -> RpcResult { diff --git a/src/node/in_memory.rs b/src/node/in_memory.rs index 6e7d170d..fe153c06 100644 --- a/src/node/in_memory.rs +++ b/src/node/in_memory.rs @@ -1253,7 +1253,26 @@ impl InMemoryNode { } /// Executes the given L2 transaction and returns all the VM logs. - pub fn run_l2_tx_inner( + /// + /// **NOTE** + /// + /// This function must only rely on data populated initially via [ForkDetails]: + /// * [InMemoryNodeInner::current_timestamp] + /// * [InMemoryNodeInner::current_batch] + /// * [InMemoryNodeInner::current_miniblock] + /// * [InMemoryNodeInner::current_miniblock_hash] + /// * [InMemoryNodeInner::l1_gas_price] + /// + /// And must _NEVER_ rely on data updated in [InMemoryNodeInner] during previous runs: + /// (if used, they must never panic and/or have meaningful defaults) + /// * [InMemoryNodeInner::block_hashes] + /// * [InMemoryNodeInner::blocks] + /// * [InMemoryNodeInner::tx_results] + /// + /// This is because external users of the library may call this function to perform an isolated + /// VM operation with an external storage and get the results back. + /// So any data populated in [Self::run_l2_tx] will not be available for the next invocation. + pub fn run_l2_tx_raw( &self, l2_tx: L2Tx, execution_mode: TxExecutionMode, @@ -1420,17 +1439,14 @@ impl InMemoryNode { let hash = compute_hash(block_ctx.miniblock, l2_tx.hash()); let mut transaction = zksync_types::api::Transaction::from(l2_tx); - let block_hash = inner - .block_hashes - .get(&inner.current_miniblock) - .ok_or(format!( - "Block hash not found for block: {}", - inner.current_miniblock - ))?; - transaction.block_hash = Some(*block_hash); + transaction.block_hash = Some(inner.current_miniblock_hash); transaction.block_number = Some(U64::from(inner.current_miniblock)); - let parent_block_hash = *inner.block_hashes.get(&(block_ctx.miniblock - 1)).unwrap(); + let parent_block_hash = inner + .block_hashes + .get(&(block_ctx.miniblock - 1)) + .cloned() + .unwrap_or_default(); let block = Block { hash, @@ -1491,7 +1507,7 @@ impl InMemoryNode { } let (keys, result, call_traces, block, bytecodes, block_ctx) = - self.run_l2_tx_inner(l2_tx.clone(), execution_mode)?; + self.run_l2_tx_raw(l2_tx.clone(), execution_mode)?; if let ExecutionResult::Halt { reason } = result.result { // Halt means that something went really bad with the transaction execution (in most cases invalid signature, @@ -1780,4 +1796,39 @@ mod tests { assert_eq!(first_block.parent_hash, compute_hash(123, H256::zero())); assert_eq!(second_block.parent_hash, first_block.hash); } + + #[tokio::test] + async fn test_run_l2_tx_raw_does_not_panic_on_external_storage_call() { + // Perform a transaction to get storage to an intermediate state + let node = InMemoryNode::::default(); + let tx = testing::TransactionBuilder::new().build(); + node.set_rich_account(tx.common_data.initiator_address); + node.run_l2_tx(tx, TxExecutionMode::VerifyExecute).unwrap(); + let external_storage = node.inner.read().unwrap().fork_storage.clone(); + + // Execute next transaction using a fresh in-memory node and the external fork storage + let mock_db = testing::ExternalStorage { + raw_storage: external_storage.inner.read().unwrap().raw_storage.clone(), + }; + let node = InMemoryNode::new( + Some(ForkDetails { + fork_source: &mock_db, + l1_block: L1BatchNumber(1), + l2_block: Block::default(), + l2_miniblock: 2, + l2_miniblock_hash: Default::default(), + block_timestamp: 1002, + overwrite_chain_id: None, + l1_gas_price: 1000, + }), + None, + Default::default(), + ); + + node.run_l2_tx_raw( + testing::TransactionBuilder::new().build(), + TxExecutionMode::VerifyExecute, + ) + .expect("transaction must pass with external storage"); + } } diff --git a/src/testing.rs b/src/testing.rs index 07346be5..4e7a3609 100644 --- a/src/testing.rs +++ b/src/testing.rs @@ -5,6 +5,7 @@ #![cfg(test)] +use crate::deps::InMemoryStorage; use crate::node::{InMemoryNode, TxExecutionInfo}; use crate::{fork::ForkSource, node::compute_hash}; @@ -17,9 +18,12 @@ use httptest::{ use itertools::Itertools; use multivm::interface::{ExecutionResult, VmExecutionResultAndLogs}; use std::str::FromStr; -use zksync_basic_types::{H160, U64}; -use zksync_types::api::{BridgeAddresses, DebugCall, DebugCallType, Log}; +use zksync_basic_types::{AccountTreeId, MiniblockNumber, H160, U64}; +use zksync_types::api::{BlockIdVariant, BridgeAddresses, DebugCall, DebugCallType, Log}; +use zksync_types::block::pack_block_info; +use zksync_types::StorageKey; use zksync_types::{fee::Fee, l2::L2Tx, Address, L2ChainId, Nonce, ProtocolVersionId, H256, U256}; +use zksync_utils::u256_to_h256; /// Configuration for the [MockServer]'s initial block. #[derive(Default, Debug, Clone)] @@ -668,7 +672,121 @@ pub fn assert_bridge_addresses_eq( ); } +/// Represents a read-only fork source that is backed by the provided [InMemoryStorage]. +#[derive(Debug, Clone)] +pub struct ExternalStorage { + pub raw_storage: InMemoryStorage, +} + +impl ForkSource for &ExternalStorage { + fn get_storage_at( + &self, + address: H160, + idx: U256, + _block: Option, + ) -> eyre::Result { + let key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(idx)); + Ok(self + .raw_storage + .state + .get(&key) + .cloned() + .unwrap_or_default()) + } + + fn get_raw_block_transactions( + &self, + _block_number: MiniblockNumber, + ) -> eyre::Result> { + todo!() + } + + fn get_bytecode_by_hash(&self, hash: H256) -> eyre::Result>> { + Ok(self.raw_storage.factory_deps.get(&hash).cloned()) + } + + fn get_transaction_by_hash( + &self, + _hash: H256, + ) -> eyre::Result> { + todo!() + } + + fn get_transaction_details( + &self, + _hash: H256, + ) -> eyre::Result> { + todo!() + } + + fn get_block_by_hash( + &self, + _hash: H256, + _full_transactions: bool, + ) -> eyre::Result>> { + todo!() + } + + fn get_block_by_number( + &self, + _block_number: zksync_types::api::BlockNumber, + _full_transactions: bool, + ) -> eyre::Result>> { + todo!() + } + + fn get_block_details( + &self, + _miniblock: MiniblockNumber, + ) -> eyre::Result> { + todo!() + } + + fn get_block_transaction_count_by_hash(&self, _block_hash: H256) -> eyre::Result> { + todo!() + } + + fn get_block_transaction_count_by_number( + &self, + _block_number: zksync_types::api::BlockNumber, + ) -> eyre::Result> { + todo!() + } + + fn get_transaction_by_block_hash_and_index( + &self, + _block_hash: H256, + _index: zksync_basic_types::web3::types::Index, + ) -> eyre::Result> { + todo!() + } + + fn get_transaction_by_block_number_and_index( + &self, + _block_number: zksync_types::api::BlockNumber, + _index: zksync_basic_types::web3::types::Index, + ) -> eyre::Result> { + todo!() + } + + fn get_bridge_contracts(&self) -> eyre::Result { + todo!() + } + + fn get_confirmed_tokens( + &self, + _from: u32, + _limit: u8, + ) -> eyre::Result> { + todo!() + } +} + mod test { + use maplit::hashmap; + use zksync_types::block::unpack_block_info; + use zksync_utils::h256_to_u256; + use super::*; use crate::http_fork_source::HttpForkSource; @@ -814,4 +932,73 @@ mod test { log.topics ); } + + #[test] + fn test_external_storage() { + let input_batch = 1; + let input_l2_block = 2; + let input_timestamp = 3; + let input_bytecode = vec![0x4]; + let batch_key = StorageKey::new( + AccountTreeId::new(zksync_types::SYSTEM_CONTEXT_ADDRESS), + zksync_types::SYSTEM_CONTEXT_BLOCK_INFO_POSITION, + ); + let l2_block_key = StorageKey::new( + AccountTreeId::new(zksync_types::SYSTEM_CONTEXT_ADDRESS), + zksync_types::SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + + let storage = &ExternalStorage { + raw_storage: InMemoryStorage { + state: hashmap! { + batch_key => u256_to_h256(U256::from(input_batch)), + l2_block_key => u256_to_h256(pack_block_info( + input_l2_block, + input_timestamp, + )) + }, + factory_deps: hashmap! { + H256::repeat_byte(0x1) => input_bytecode.clone(), + }, + }, + }; + + let actual_batch = storage + .get_storage_at( + zksync_types::SYSTEM_CONTEXT_ADDRESS, + h256_to_u256(zksync_types::SYSTEM_CONTEXT_BLOCK_INFO_POSITION), + None, + ) + .map(|value| h256_to_u256(value).as_u64()) + .expect("failed getting batch number"); + assert_eq!(input_batch, actual_batch); + + let (actual_l2_block, actual_timestamp) = storage + .get_storage_at( + zksync_types::SYSTEM_CONTEXT_ADDRESS, + h256_to_u256(zksync_types::SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION), + None, + ) + .map(|value| unpack_block_info(h256_to_u256(value))) + .expect("failed getting l2 block info"); + assert_eq!(input_l2_block, actual_l2_block); + assert_eq!(input_timestamp, actual_timestamp); + + let zero_missing_value = storage + .get_storage_at( + zksync_types::SYSTEM_CONTEXT_ADDRESS, + h256_to_u256(H256::repeat_byte(0x1e)), + None, + ) + .map(|value| h256_to_u256(value).as_u64()) + .expect("failed missing value"); + assert_eq!(0, zero_missing_value); + + let actual_bytecode = storage + .get_bytecode_by_hash(H256::repeat_byte(0x1)) + .ok() + .expect("failed getting bytecode") + .expect("missing bytecode"); + assert_eq!(input_bytecode, actual_bytecode); + } }