diff --git a/SUPPORTED_APIS.md b/SUPPORTED_APIS.md
index 0206b109..905d4f0e 100644
--- a/SUPPORTED_APIS.md
+++ b/SUPPORTED_APIS.md
@@ -14,6 +14,8 @@ The `status` options are:
| Namespace | API |
Status
| Description |
| --- | --- | --- | --- |
+| `ANVIL` | `anvil_setBlockTimestampInterval` | `SUPPORTED` | Sets the block timestamp interval |
+| `ANVIL` | `anvil_removeBlockTimestampInterval` | `SUPPORTED` | Removes the block timestamp interval |
| `ANVIL` | `anvil_setMinGasPrice` | `NOT IMPLEMENTED` | Set the minimum gas price for the node. Unsupported for ZKsync as it is only relevant for pre-EIP1559 chains |
| `ANVIL` | `anvil_setLoggingEnabled` | `SUPPORTED` | Enables or disables logging |
| `ANVIL` | `anvil_snapshot` | `SUPPORTED` | Snapshot the state of the blockchain at the current block |
diff --git a/e2e-tests/test/anvil-apis.test.ts b/e2e-tests/test/anvil-apis.test.ts
index 82cc75b4..2afffdbd 100644
--- a/e2e-tests/test/anvil-apis.test.ts
+++ b/e2e-tests/test/anvil-apis.test.ts
@@ -11,6 +11,47 @@ import * as fs from "node:fs";
const provider = getTestProvider();
+describe("anvil_setBlockTimestampInterval & anvil_removeBlockTimestampInterval", function () {
+ it("Should control timestamp interval between blocks", async function () {
+ // Arrange
+ const interval = 42;
+ let expectedTimestamp: number = await provider.send("config_getCurrentTimestamp", []);
+ expectedTimestamp += interval;
+ const wallet = new Wallet(RichAccounts[0].PrivateKey, provider);
+ const userWallet = Wallet.createRandom().connect(provider);
+
+ // Set interval
+ await provider.send("anvil_setBlockTimestampInterval", [interval]);
+
+ const txResponse = await wallet.sendTransaction({
+ to: userWallet.address,
+ value: ethers.utils.parseEther("0.1"),
+ });
+ const txReceipt = await txResponse.wait();
+
+ // Assert new block is `interval` apart from start
+ const newBlockTimestamp = (await provider.getBlock(txReceipt.blockNumber)).timestamp;
+ expect(newBlockTimestamp).to.equal(expectedTimestamp);
+
+ // Accomodate for virtual block
+ expectedTimestamp += interval;
+
+ // Remove interval
+ const result: boolean = await provider.send("anvil_removeBlockTimestampInterval", []);
+ expect(result);
+
+ const txResponse2 = await wallet.sendTransaction({
+ to: userWallet.address,
+ value: ethers.utils.parseEther("0.1"),
+ });
+ const txReceipt2 = await txResponse2.wait();
+
+ // Assert new block is `1` apart from previous block
+ const newBlockTimestamp2 = (await provider.getBlock(txReceipt2.blockNumber)).timestamp;
+ expect(newBlockTimestamp2).to.equal(expectedTimestamp + 1);
+ });
+});
+
describe("anvil_setLoggingEnabled", function () {
it("Should disable and enable logging", async function () {
// Arrange
diff --git a/src/namespaces/anvil.rs b/src/namespaces/anvil.rs
index 61f13c00..b77e320a 100644
--- a/src/namespaces/anvil.rs
+++ b/src/namespaces/anvil.rs
@@ -6,6 +6,23 @@ use crate::utils::Numeric;
#[rpc]
pub trait AnvilNamespaceT {
+ /// Sets the block timestamp interval. All future blocks' timestamps will
+ /// have the provided amount of seconds in-between of them. Does not affect
+ /// the block production interval.
+ ///
+ /// # Arguments
+ ///
+ /// * `seconds` - The minimum gas price to be set
+ #[rpc(name = "anvil_setBlockTimestampInterval")]
+ fn set_block_timestamp_interval(&self, seconds: u64) -> RpcResult<()>;
+
+ /// Removes the block timestamp interval if it exists.
+ ///
+ /// # Returns
+ /// `true` if an existing interval was removed, `false` otherwise
+ #[rpc(name = "anvil_removeBlockTimestampInterval")]
+ fn remove_block_timestamp_interval(&self) -> RpcResult;
+
/// Set the minimum gas price for the node. Unsupported for ZKsync as it is only relevant for
/// pre-EIP1559 chains.
///
diff --git a/src/node/anvil.rs b/src/node/anvil.rs
index ff208bd0..96ef63a2 100644
--- a/src/node/anvil.rs
+++ b/src/node/anvil.rs
@@ -12,6 +12,15 @@ use crate::{
impl AnvilNamespaceT
for InMemoryNode
{
+ fn set_block_timestamp_interval(&self, seconds: u64) -> RpcResult<()> {
+ self.time.set_block_timestamp_interval(seconds);
+ Ok(()).into_boxed_future()
+ }
+
+ fn remove_block_timestamp_interval(&self) -> RpcResult {
+ Ok(self.time.remove_block_timestamp_interval()).into_boxed_future()
+ }
+
fn set_min_gas_price(&self, _gas: U256) -> RpcResult<()> {
tracing::info!("anvil_setMinGasPrice is unsupported as ZKsync is a post-EIP1559 chain");
Err(into_jsrpc_error(Web3Error::MethodNotImplemented)).into_boxed_future()
diff --git a/src/node/block_producer.rs b/src/node/block_producer.rs
index 8f81a15e..4302eb07 100644
--- a/src/node/block_producer.rs
+++ b/src/node/block_producer.rs
@@ -46,7 +46,7 @@ impl Future for BlockProducer {
.contracts(TxExecutionMode::VerifyExecute, impersonating)
.clone();
pin.node
- .seal_block(txs, base_system_contracts)
+ .seal_block(&mut pin.node.time.lock(), txs, base_system_contracts)
.expect("block sealing failed");
}
}
diff --git a/src/node/config_api.rs b/src/node/config_api.rs
index cbc03ccc..4536db88 100644
--- a/src/node/config_api.rs
+++ b/src/node/config_api.rs
@@ -1,5 +1,6 @@
use zksync_web3_decl::error::Web3Error;
+use crate::node::time::ReadTime;
use crate::{
config::show_details::{ShowCalls, ShowGasDetails, ShowStorageLogs, ShowVMDetails},
fork::ForkSource,
@@ -37,7 +38,7 @@ impl Configurat
}
fn config_get_current_timestamp(&self) -> Result {
- Ok(self.time.last_timestamp())
+ Ok(self.time.current_timestamp())
}
fn config_set_show_calls(&self, value: String) -> Result {
diff --git a/src/node/debug.rs b/src/node/debug.rs
index 0e2ff817..9dbe6be5 100644
--- a/src/node/debug.rs
+++ b/src/node/debug.rs
@@ -144,6 +144,7 @@ impl DebugNames
) -> RpcResult {
let only_top = options.is_some_and(|o| o.tracer_config.only_top_call);
let inner = self.get_inner().clone();
+ let time = self.time.clone();
Box::pin(async move {
if block.is_some() && !matches!(block, Some(BlockId::Number(BlockNumber::Latest))) {
return Err(jsonrpc_core::Error::invalid_params(
@@ -165,7 +166,8 @@ impl DebugNames
let storage = StorageView::new(&inner.fork_storage).into_rc_ptr();
// init vm
- let (mut l1_batch_env, _block_context) = inner.create_l1_batch_env(storage.clone());
+ let (mut l1_batch_env, _block_context) =
+ inner.create_l1_batch_env(&time, storage.clone());
// update the enforced_base_fee within l1_batch_env to match the logic in zksync_core
l1_batch_env.enforced_base_fee = Some(l2_tx.common_data.fee.max_fee_per_gas.as_u64());
diff --git a/src/node/eth.rs b/src/node/eth.rs
index 7ad014ee..388e167a 100644
--- a/src/node/eth.rs
+++ b/src/node/eth.rs
@@ -662,7 +662,7 @@ impl EthNamespa
}
};
- let result: jsonrpc_core::Result = reader.estimate_gas_impl(req);
+ let result: jsonrpc_core::Result = reader.estimate_gas_impl(&self.time, req);
match result {
Ok(fee) => Ok(fee.gas_limit).into_boxed_future(),
Err(err) => return futures::future::err(err).boxed(),
@@ -2842,7 +2842,7 @@ mod tests {
inner.current_batch = 1;
inner.current_miniblock = 1;
inner.current_miniblock_hash = H256::repeat_byte(0x1);
- inner.time.set_last_timestamp_unchecked(1);
+ node.time.set_current_timestamp_unchecked(1);
inner
.filters
.add_block_filter()
@@ -2859,7 +2859,6 @@ mod tests {
let storage = inner.fork_storage.inner.read().unwrap();
let expected_snapshot = Snapshot {
- current_timestamp: inner.time.last_timestamp(),
current_batch: inner.current_batch,
current_miniblock: inner.current_miniblock,
current_miniblock_hash: inner.current_miniblock_hash,
@@ -2877,10 +2876,6 @@ mod tests {
};
let actual_snapshot = inner.snapshot().expect("failed taking snapshot");
- assert_eq!(
- expected_snapshot.current_timestamp,
- actual_snapshot.current_timestamp
- );
assert_eq!(
expected_snapshot.current_batch,
actual_snapshot.current_batch
@@ -2948,7 +2943,7 @@ mod tests {
inner.current_batch = 1;
inner.current_miniblock = 1;
inner.current_miniblock_hash = H256::repeat_byte(0x1);
- inner.time.set_last_timestamp_unchecked(1);
+ node.time.set_current_timestamp_unchecked(1);
inner
.filters
.add_block_filter()
@@ -2966,7 +2961,6 @@ mod tests {
let expected_snapshot = {
let storage = inner.fork_storage.inner.read().unwrap();
Snapshot {
- current_timestamp: inner.time.last_timestamp(),
current_batch: inner.current_batch,
current_miniblock: inner.current_miniblock,
current_miniblock_hash: inner.current_miniblock_hash,
@@ -3001,7 +2995,7 @@ mod tests {
inner.current_batch = 2;
inner.current_miniblock = 2;
inner.current_miniblock_hash = H256::repeat_byte(0x2);
- inner.time.set_last_timestamp_unchecked(2);
+ node.time.set_current_timestamp_unchecked(2);
inner
.filters
.add_pending_transaction_filter()
@@ -3022,10 +3016,6 @@ mod tests {
.expect("failed restoring snapshot");
let storage = inner.fork_storage.inner.read().unwrap();
- assert_eq!(
- expected_snapshot.current_timestamp,
- inner.time.last_timestamp()
- );
assert_eq!(expected_snapshot.current_batch, inner.current_batch);
assert_eq!(expected_snapshot.current_miniblock, inner.current_miniblock);
assert_eq!(
diff --git a/src/node/in_memory.rs b/src/node/in_memory.rs
index 230de7aa..8537cc16 100644
--- a/src/node/in_memory.rs
+++ b/src/node/in_memory.rs
@@ -47,7 +47,7 @@ use zksync_utils::{bytecode::hash_bytecode, h256_to_account_address, h256_to_u25
use zksync_web3_decl::error::Web3Error;
use crate::node::impersonate::{ImpersonationManager, ImpersonationState};
-use crate::node::time::TimestampManager;
+use crate::node::time::{AdvanceTime, ReadTime, TimestampManager};
use crate::node::TxPool;
use crate::{
bootloader_debug::{BootloaderDebug, BootloaderDebugTracer},
@@ -231,8 +231,6 @@ impl TransactionResult {
/// S - is the Source of the Fork.
#[derive(Clone)]
pub struct InMemoryNodeInner {
- /// Supplies timestamps that are unique across the system.
- pub time: TimestampManager,
/// The latest batch number that was already generated.
/// Next block will be current_batch + 1
pub current_batch: u32,
@@ -275,7 +273,7 @@ impl InMemoryNodeInner {
pub fn new(
fork: Option,
config: &TestNodeConfig,
- time: TimestampManager,
+ time: &TimestampManager,
impersonation: ImpersonationManager,
) -> Self {
let updated_config = config.clone();
@@ -302,10 +300,9 @@ impl InMemoryNodeInner {
f.estimate_gas_scale_factor,
)
};
- time.set_last_timestamp_unchecked(f.block_timestamp);
+ time.set_current_timestamp_unchecked(f.block_timestamp);
InMemoryNodeInner {
- time,
current_batch: f.l1_block.0,
current_miniblock: f.l2_miniblock,
current_miniblock_hash: f.l2_miniblock_hash,
@@ -344,10 +341,9 @@ impl InMemoryNodeInner {
blocks.insert(block_hash, genesis_block);
let fee_input_provider = TestNodeFeeInputProvider::default();
- time.set_last_timestamp_unchecked(NON_FORK_FIRST_BLOCK_TIMESTAMP);
+ time.set_current_timestamp_unchecked(NON_FORK_FIRST_BLOCK_TIMESTAMP);
InMemoryNodeInner {
- time,
current_batch: 0,
current_miniblock: 0,
current_miniblock_hash: block_hash,
@@ -380,31 +376,28 @@ impl InMemoryNodeInner {
/// We compute l1/l2 block details from storage to support fork testing, where the storage
/// can be updated mid execution and no longer matches with the initial node's state.
/// The L1 & L2 timestamps are also compared with node's timestamp to ensure it always increases monotonically.
- pub fn create_l1_batch_env(
+ pub fn create_l1_batch_env(
&self,
+ time: &T,
storage: StoragePtr,
) -> (L1BatchEnv, BlockContext) {
tracing::debug!("Creating l1 batch env...");
- let (last_l1_block_num, last_l1_block_ts) = load_last_l1_batch(storage.clone())
- .map(|(num, ts)| (num as u32, ts))
- .unwrap_or_else(|| (self.current_batch, self.time.last_timestamp()));
+ let last_l1_block_num = load_last_l1_batch(storage.clone())
+ .map(|(num, _)| num as u32)
+ .unwrap_or(self.current_batch);
let last_l2_block = load_last_l2_block(&storage).unwrap_or_else(|| L2Block {
number: self.current_miniblock as u32,
hash: L2BlockHasher::legacy_hash(L2BlockNumber(self.current_miniblock as u32)),
- timestamp: self.time.last_timestamp(),
+ timestamp: time.current_timestamp(),
});
- let latest_timestamp = std::cmp::max(
- std::cmp::max(last_l1_block_ts, last_l2_block.timestamp),
- self.time.last_timestamp(),
- );
- let block_ctx = BlockContext::from_current(
- last_l1_block_num,
- last_l2_block.number as u64,
- latest_timestamp,
- )
- .new_batch();
+ let block_ctx = BlockContext {
+ hash: H256::zero(),
+ batch: last_l1_block_num.saturating_add(1),
+ miniblock: (last_l2_block.number as u64).saturating_add(1),
+ timestamp: time.peek_next_timestamp(),
+ };
let fee_input = if let Some(fork) = &self
.fork_storage
@@ -476,8 +469,9 @@ impl InMemoryNodeInner {
/// # Returns
///
/// A `Result` with a `Fee` representing the estimated gas related data.
- pub fn estimate_gas_impl(
+ pub fn estimate_gas_impl(
&self,
+ time: &T,
req: zksync_types::transaction_request::CallRequest,
) -> jsonrpc_core::Result {
let mut request_with_gas_per_pubdata_overridden = req;
@@ -547,7 +541,7 @@ impl InMemoryNodeInner {
let storage = storage_view.into_rc_ptr();
let execution_mode = TxExecutionMode::EstimateFee;
- let (mut batch_env, _) = self.create_l1_batch_env(storage.clone());
+ let (mut batch_env, _) = self.create_l1_batch_env(time, storage.clone());
batch_env.fee_input = fee_input;
let system_env = self.create_system_env(system_contracts, execution_mode);
@@ -842,7 +836,6 @@ impl InMemoryNodeInner {
.map_err(|err| format!("failed acquiring read lock on storage: {:?}", err))?;
Ok(Snapshot {
- current_timestamp: self.time.last_timestamp(),
current_batch: self.current_batch,
current_miniblock: self.current_miniblock,
current_miniblock_hash: self.current_miniblock_hash,
@@ -868,8 +861,6 @@ impl InMemoryNodeInner {
.write()
.map_err(|err| format!("failed acquiring write lock on storage: {:?}", err))?;
- self.time
- .set_last_timestamp_unchecked(snapshot.current_timestamp);
self.current_batch = snapshot.current_batch;
self.current_miniblock = snapshot.current_miniblock;
self.current_miniblock_hash = snapshot.current_miniblock_hash;
@@ -887,13 +878,64 @@ impl InMemoryNodeInner {
Ok(())
}
+
+ fn apply_block(
+ &mut self,
+ time: &mut T,
+ block: Block,
+ index: u32,
+ ) {
+ // archive current state before we produce new batch/blocks
+ if let Err(err) = self.archive_state() {
+ tracing::error!(
+ "failed archiving state for block {}: {}",
+ self.current_miniblock,
+ err
+ );
+ }
+
+ self.current_miniblock = self.current_miniblock.saturating_add(1);
+ let expected_timestamp = time.advance_timestamp();
+
+ let actual_l1_batch_number = block
+ .l1_batch_number
+ .expect("block must have a l1_batch_number");
+ if actual_l1_batch_number.as_u32() != self.current_batch {
+ panic!(
+ "expected next block to have batch_number {}, got {}",
+ self.current_batch,
+ actual_l1_batch_number.as_u32()
+ );
+ }
+
+ if block.number.as_u64() != self.current_miniblock {
+ panic!(
+ "expected next block to have miniblock {}, got {} | {index}",
+ self.current_miniblock,
+ block.number.as_u64()
+ );
+ }
+
+ if block.timestamp.as_u64() != expected_timestamp {
+ panic!(
+ "expected next block to have timestamp {}, got {} | {index}",
+ expected_timestamp,
+ block.timestamp.as_u64()
+ );
+ }
+
+ let block_hash = block.hash;
+ self.current_miniblock_hash = block_hash;
+ self.block_hashes.insert(block.number.as_u64(), block.hash);
+ self.blocks.insert(block.hash, block);
+ self.filters.notify_new_block(block_hash);
+ }
}
/// Creates a restorable snapshot for the [InMemoryNodeInner]. The snapshot contains all the necessary
/// data required to restore the [InMemoryNodeInner] state to a previous point in time.
#[derive(Debug, Clone, Default)]
pub struct Snapshot {
- pub(crate) current_timestamp: u64,
pub(crate) current_batch: u32,
pub(crate) current_miniblock: u64,
pub(crate) current_miniblock_hash: H256,
@@ -964,7 +1006,7 @@ impl InMemoryNode {
pool: TxPool,
) -> Self {
let system_contracts_options = config.system_contracts_options;
- let inner = InMemoryNodeInner::new(fork, config, time.clone(), impersonation.clone());
+ let inner = InMemoryNodeInner::new(fork, config, &time, impersonation.clone());
InMemoryNode {
inner: Arc::new(RwLock::new(inner)),
snapshots: Default::default(),
@@ -1021,12 +1063,7 @@ impl InMemoryNode {
pub fn reset(&self, fork: Option) -> Result<(), String> {
let config = self.get_config()?;
- let inner = InMemoryNodeInner::new(
- fork,
- &config,
- TimestampManager::default(),
- ImpersonationManager::default(),
- );
+ let inner = InMemoryNodeInner::new(fork, &config, &self.time, self.impersonation.clone());
let mut writer = self
.snapshots
@@ -1063,11 +1100,13 @@ impl InMemoryNode {
pub fn apply_txs(&self, txs: Vec) -> anyhow::Result<()> {
tracing::info!("Running {:?} transactions (one per batch)", txs.len());
+ // Lock time so that the produced blocks are guaranteed to be sequential in time.
+ let mut time = self.time.lock();
for tx in txs {
// Getting contracts is reasonably cheap, so we don't cache them. We may need differing contracts
// depending on whether impersonation should be enabled for a transaction.
let system_contracts = self.system_contracts_for_tx(tx.initiator_account())?;
- self.seal_block(vec![tx], system_contracts)?;
+ self.seal_block(&mut time, vec![tx], system_contracts)?;
}
Ok(())
@@ -1145,7 +1184,7 @@ impl InMemoryNode {
// init vm
- let (batch_env, _) = inner.create_l1_batch_env(storage.clone());
+ let (batch_env, _) = inner.create_l1_batch_env(&self.time, storage.clone());
let system_env = inner.create_system_env(base_contracts, execution_mode);
let mut vm: Vm<_, HistoryDisabled> = Vm::new(batch_env, system_env, storage.clone());
@@ -1684,8 +1723,12 @@ impl InMemoryNode {
Ok(())
}
- pub fn seal_block(
+ // Requirement for `TimeExclusive` ensures that we have exclusive writeable access to time
+ // manager. Meaning we can construct blocks and apply them without worrying about TOCTOU with
+ // timestamps.
+ pub fn seal_block(
&self,
+ time: &mut T,
txs: Vec,
system_contracts: BaseSystemContracts,
) -> anyhow::Result {
@@ -1696,7 +1739,7 @@ impl InMemoryNode {
.map_err(|_| anyhow::anyhow!("Failed to acquire read lock"))?;
let storage = StorageView::new(inner.fork_storage.clone()).into_rc_ptr();
let system_env = inner.create_system_env(system_contracts, TxExecutionMode::VerifyExecute);
- let (batch_env, mut block_ctx) = inner.create_l1_batch_env(storage.clone());
+ let (batch_env, mut block_ctx) = inner.create_l1_batch_env(time, storage.clone());
drop(inner);
let mut vm: Vm<_, HistoryDisabled> =
@@ -1775,7 +1818,8 @@ impl InMemoryNode {
gas_used,
logs_bloom,
);
- let mut blocks = vec![block];
+ inner.current_batch = inner.current_batch.saturating_add(1);
+ inner.apply_block(time, block, 0);
// Hack to ensure we don't mine twice the amount of requested empty blocks (i.e. one per
// batch).
@@ -1785,7 +1829,7 @@ impl InMemoryNode {
// we are adding one l2 block at the end of each batch (to handle things like remaining events etc).
// You can look at insert_fictive_l2_block function in VM to see how this fake block is inserted.
let parent_block_hash = block_ctx.hash;
- let block_ctx = block_ctx.new_block();
+ let block_ctx = block_ctx.new_block(time);
let hash = compute_hash(block_ctx.miniblock, []);
let virtual_block = create_block(
@@ -1798,56 +1842,7 @@ impl InMemoryNode {
U256::zero(),
Bloom::zero(),
);
- blocks.push(virtual_block);
- }
-
- inner.current_batch = inner.current_batch.saturating_add(1);
-
- for (i, block) in blocks.into_iter().enumerate() {
- // archive current state before we produce new batch/blocks
- if let Err(err) = inner.archive_state() {
- tracing::error!(
- "failed archiving state for block {}: {}",
- inner.current_miniblock,
- err
- );
- }
-
- inner.current_miniblock = inner.current_miniblock.saturating_add(1);
- let expected_timestamp = inner.time.next_timestamp();
-
- let actual_l1_batch_number = block
- .l1_batch_number
- .expect("block must have a l1_batch_number");
- if actual_l1_batch_number.as_u32() != inner.current_batch {
- panic!(
- "expected next block to have batch_number {}, got {}",
- inner.current_batch,
- actual_l1_batch_number.as_u32()
- );
- }
-
- if block.number.as_u64() != inner.current_miniblock {
- panic!(
- "expected next block to have miniblock {}, got {} | {i}",
- inner.current_miniblock,
- block.number.as_u64()
- );
- }
-
- if block.timestamp.as_u64() != expected_timestamp {
- panic!(
- "expected next block to have timestamp {}, got {} | {i}",
- expected_timestamp,
- block.timestamp.as_u64()
- );
- }
-
- let block_hash = block.hash;
- inner.current_miniblock_hash = block_hash;
- inner.block_hashes.insert(block.number.as_u64(), block.hash);
- inner.blocks.insert(block.hash, block);
- inner.filters.notify_new_block(block_hash);
+ inner.apply_block(time, virtual_block, 1);
}
Ok(L2BlockNumber(block_ctx.miniblock as u32))
@@ -1885,33 +1880,13 @@ pub struct BlockContext {
}
impl BlockContext {
- /// Create the current instance that represents the latest block.
- pub fn from_current(batch: u32, miniblock: u64, timestamp: u64) -> Self {
- Self {
- hash: H256::zero(),
- batch,
- miniblock,
- timestamp,
- }
- }
-
- /// Create the next batch instance that has all parameters incremented by `1`.
- pub fn new_batch(&self) -> Self {
- Self {
- hash: H256::zero(),
- batch: self.batch.saturating_add(1),
- miniblock: self.miniblock.saturating_add(1),
- timestamp: self.timestamp.saturating_add(1),
- }
- }
-
/// Create the next batch instance that uses the same batch number, and has all other parameters incremented by `1`.
- pub fn new_block(&self) -> BlockContext {
+ pub fn new_block(&self, time: &T) -> BlockContext {
Self {
hash: H256::zero(),
batch: self.batch,
miniblock: self.miniblock.saturating_add(1),
- timestamp: self.timestamp.saturating_add(1),
+ timestamp: time.peek_next_timestamp(),
}
}
}
@@ -1966,7 +1941,7 @@ mod tests {
let inner = node.inner.read().unwrap();
let storage = StorageView::new(inner.fork_storage.clone()).into_rc_ptr();
let system_env = inner.create_system_env(system_contracts, TxExecutionMode::VerifyExecute);
- let (batch_env, block_ctx) = inner.create_l1_batch_env(storage.clone());
+ let (batch_env, block_ctx) = inner.create_l1_batch_env(&node.time, storage.clone());
let vm: Vm<_, HistoryDisabled> = Vm::new(batch_env.clone(), system_env, storage);
(block_ctx, batch_env, vm)
@@ -2063,7 +2038,8 @@ mod tests {
let system_contracts = node
.system_contracts_for_tx(tx.initiator_account())
.unwrap();
- node.seal_block(vec![tx], system_contracts).unwrap();
+ node.seal_block(&mut node.time.lock(), vec![tx], system_contracts)
+ .unwrap();
let external_storage = node.inner.read().unwrap().fork_storage.clone();
// Execute next transaction using a fresh in-memory node and the external fork storage
diff --git a/src/node/in_memory_ext.rs b/src/node/in_memory_ext.rs
index 9c7bcf6c..1fb476a1 100644
--- a/src/node/in_memory_ext.rs
+++ b/src/node/in_memory_ext.rs
@@ -46,7 +46,7 @@ impl InMemoryNo
/// The new timestamp value for the InMemoryNodeInner.
pub fn set_next_block_timestamp(&self, timestamp: Numeric) -> Result<()> {
let timestamp: u64 = timestamp.try_into().context("The timestamp is too big")?;
- self.time.advance_timestamp(timestamp - 1)
+ self.time.enforce_next_timestamp(timestamp)
}
/// Set the current timestamp for the node.
@@ -59,7 +59,7 @@ impl InMemoryNo
/// # Returns
/// The difference between the `current_timestamp` and the new timestamp for the InMemoryNodeInner.
pub fn set_time(&self, timestamp: Numeric) -> Result {
- Ok(self.time.set_last_timestamp_unchecked(
+ Ok(self.time.set_current_timestamp_unchecked(
timestamp.try_into().context("The timestamp is too big")?,
))
}
@@ -76,7 +76,8 @@ impl InMemoryNo
.read()
.map_err(|err| anyhow!("failed acquiring lock: {:?}", err))
.map(|inner| inner.system_contracts.contracts_for_l2_call().clone())?;
- let block_number = self.seal_block(vec![], bootloader_code.clone())?;
+ let block_number =
+ self.seal_block(&mut self.time.lock(), vec![], bootloader_code.clone())?;
tracing::info!("👷 Mined block #{}", block_number);
Ok("0x0".to_string())
}
@@ -207,21 +208,20 @@ impl InMemoryNo
if num_blocks == 0 {
return Ok(());
}
+ if num_blocks > 1 && interval_sec == 0 {
+ anyhow::bail!("Provided interval is `0`; unable to produce {num_blocks} blocks with the same timestamp");
+ }
let bootloader_code = self
.get_inner()
.read()
.map_err(|err| anyhow!("failed acquiring lock: {:?}", err))
.map(|inner| inner.system_contracts.contracts_for_l2_call().clone())?;
- for i in 0..num_blocks {
- if i != 0 {
- // Accounts for the default increment of 1 done in `seal_block`. Note that
- // there is no guarantee that blocks produced by this method will have *exactly*
- // `interval` seconds in-between of their respective timestamps. Instead, we treat
- // it as the minimum amount of time that should have passed in-between of blocks.
- self.time.increase_time(interval_sec.saturating_sub(1));
- }
- self.seal_block(vec![], bootloader_code.clone())?;
+ let mut time = self
+ .time
+ .lock_with_offsets((0..num_blocks).map(|i| i * interval_sec));
+ for _ in 0..num_blocks {
+ self.seal_block(&mut time, vec![], bootloader_code.clone())?;
}
tracing::info!("👷 Mined {} blocks", num_blocks);
@@ -367,8 +367,8 @@ mod tests {
use super::*;
use crate::fork::ForkStorage;
use crate::namespaces::EthNamespaceT;
- use crate::node::time::TimestampManager;
- use crate::node::{InMemoryNodeInner, Snapshot, TxPool};
+ use crate::node::time::{ReadTime, TimestampManager};
+ use crate::node::{ImpersonationManager, InMemoryNodeInner, Snapshot, TxPool};
use crate::{http_fork_source::HttpForkSource, node::InMemoryNode};
use std::str::FromStr;
use std::sync::{Arc, RwLock};
@@ -478,8 +478,9 @@ mod tests {
async fn test_reset() {
let old_snapshots = Arc::new(RwLock::new(vec![Snapshot::default()]));
let old_system_contracts_options = Default::default();
+ let time = TimestampManager::new(123);
+ let impersonation = ImpersonationManager::default();
let old_inner = InMemoryNodeInner:: {
- time: TimestampManager::new(123),
current_batch: 100,
current_miniblock: 300,
current_miniblock_hash: H256::random(),
@@ -492,12 +493,10 @@ mod tests {
config: Default::default(),
console_log_handler: Default::default(),
system_contracts: Default::default(),
- impersonation: Default::default(),
+ impersonation: impersonation.clone(),
rich_accounts: Default::default(),
previous_states: Default::default(),
};
- let time = old_inner.time.clone();
- let impersonation = old_inner.impersonation.clone();
let pool = TxPool::new(impersonation.clone());
let node = InMemoryNode:: {
@@ -525,7 +524,7 @@ mod tests {
assert_eq!(node.snapshots.read().unwrap().len(), 0);
let inner = node.inner.read().unwrap();
- assert_eq!(inner.time.last_timestamp(), 1000);
+ assert_eq!(node.time.current_timestamp(), 1000);
assert_eq!(inner.current_batch, 0);
assert_eq!(inner.current_miniblock, 0);
assert_ne!(inner.current_miniblock_hash, H256::random());
@@ -659,21 +658,13 @@ mod tests {
let node = InMemoryNode::::default();
let increase_value_seconds = 0u64;
- let timestamp_before = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .expect("failed reading timestamp");
+ let timestamp_before = node.time.current_timestamp();
let expected_response = increase_value_seconds;
let actual_response = node
.increase_time(increase_value_seconds.into())
.expect("failed increasing timestamp");
- let timestamp_after = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .expect("failed reading timestamp");
+ let timestamp_after = node.time.current_timestamp();
assert_eq!(expected_response, actual_response, "erroneous response");
assert_eq!(
@@ -688,22 +679,14 @@ mod tests {
let node = InMemoryNode::::default();
let increase_value_seconds = u64::MAX;
- let timestamp_before = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .expect("failed reading timestamp");
+ let timestamp_before = node.time.current_timestamp();
assert_ne!(0, timestamp_before, "initial timestamp must be non zero",);
let expected_response = increase_value_seconds;
let actual_response = node
.increase_time(increase_value_seconds.into())
.expect("failed increasing timestamp");
- let timestamp_after = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .expect("failed reading timestamp");
+ let timestamp_after = node.time.current_timestamp();
assert_eq!(expected_response, actual_response, "erroneous response");
assert_eq!(
@@ -718,21 +701,13 @@ mod tests {
let node = InMemoryNode::::default();
let increase_value_seconds = 100u64;
- let timestamp_before = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .expect("failed reading timestamp");
+ let timestamp_before = node.time.current_timestamp();
let expected_response = increase_value_seconds;
let actual_response = node
.increase_time(increase_value_seconds.into())
.expect("failed increasing timestamp");
- let timestamp_after = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .expect("failed reading timestamp");
+ let timestamp_after = node.time.current_timestamp();
assert_eq!(expected_response, actual_response, "erroneous response");
assert_eq!(
@@ -747,11 +722,7 @@ mod tests {
let node = InMemoryNode::::default();
let new_timestamp = 10_000u64;
- let timestamp_before = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .expect("failed reading timestamp");
+ let timestamp_before = node.time.current_timestamp();
assert_ne!(
timestamp_before, new_timestamp,
"timestamps must be different"
@@ -759,15 +730,11 @@ mod tests {
node.set_next_block_timestamp(new_timestamp.into())
.expect("failed setting timestamp");
- let timestamp_after = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .expect("failed reading timestamp");
+ node.mine_block().expect("failed to mine a block");
+ let timestamp_after = node.time.current_timestamp();
assert_eq!(
- new_timestamp,
- timestamp_after + 1,
+ new_timestamp, timestamp_after,
"timestamp was not set correctly",
);
}
@@ -776,16 +743,14 @@ mod tests {
async fn test_set_next_block_timestamp_past_fails() {
let node = InMemoryNode::::default();
- let timestamp_before = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .expect("failed reading timestamp");
+ let timestamp_before = node.time.current_timestamp();
let new_timestamp = timestamp_before + 500;
node.set_next_block_timestamp(new_timestamp.into())
.expect("failed setting timestamp");
+ node.mine_block().expect("failed to mine a block");
+
let result = node.set_next_block_timestamp(timestamp_before.into());
assert!(result.is_err(), "expected an error for timestamp in past");
@@ -796,21 +761,13 @@ mod tests {
let node = InMemoryNode::::default();
let new_timestamp = 1000u64;
- let timestamp_before = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .expect("failed reading timestamp");
+ let timestamp_before = node.time.current_timestamp();
assert_eq!(timestamp_before, new_timestamp, "timestamps must be same");
let response = node.set_next_block_timestamp(new_timestamp.into());
assert!(response.is_err());
- let timestamp_after = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .expect("failed reading timestamp");
+ let timestamp_after = node.time.current_timestamp();
assert_eq!(
timestamp_before, timestamp_after,
"timestamp must not change",
@@ -822,22 +779,14 @@ mod tests {
let node = InMemoryNode::::default();
let new_time = 10_000u64;
- let timestamp_before = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .expect("failed reading timestamp");
+ let timestamp_before = node.time.current_timestamp();
assert_ne!(timestamp_before, new_time, "timestamps must be different");
let expected_response = 9000;
let actual_response = node
.set_time(new_time.into())
.expect("failed setting timestamp");
- let timestamp_after = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .expect("failed reading timestamp");
+ let timestamp_after = node.time.current_timestamp();
assert_eq!(expected_response, actual_response, "erroneous response");
assert_eq!(new_time, timestamp_after, "timestamp was not set correctly",);
@@ -848,22 +797,14 @@ mod tests {
let node = InMemoryNode::::default();
let new_time = 10u64;
- let timestamp_before = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .expect("failed reading timestamp");
+ let timestamp_before = node.time.current_timestamp();
assert_ne!(timestamp_before, new_time, "timestamps must be different");
let expected_response = -990;
let actual_response = node
.set_time(new_time.into())
.expect("failed setting timestamp");
- let timestamp_after = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .expect("failed reading timestamp");
+ let timestamp_after = node.time.current_timestamp();
assert_eq!(expected_response, actual_response, "erroneous response");
assert_eq!(new_time, timestamp_after, "timestamp was not set correctly",);
@@ -874,22 +815,14 @@ mod tests {
let node = InMemoryNode::::default();
let new_time = 1000u64;
- let timestamp_before = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .expect("failed reading timestamp");
+ let timestamp_before = node.time.current_timestamp();
assert_eq!(timestamp_before, new_time, "timestamps must be same");
let expected_response = 0;
let actual_response = node
.set_time(new_time.into())
.expect("failed setting timestamp");
- let timestamp_after = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .expect("failed reading timestamp");
+ let timestamp_after = node.time.current_timestamp();
assert_eq!(expected_response, actual_response, "erroneous response");
assert_eq!(
@@ -903,11 +836,7 @@ mod tests {
let node = InMemoryNode::::default();
for new_time in [0, u64::MAX] {
- let timestamp_before = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .unwrap_or_else(|_| panic!("case {}: failed reading timestamp", new_time));
+ let timestamp_before = node.time.current_timestamp();
assert_ne!(
timestamp_before, new_time,
"case {new_time}: timestamps must be different"
@@ -917,11 +846,7 @@ mod tests {
let actual_response = node
.set_time(new_time.into())
.expect("failed setting timestamp");
- let timestamp_after = node
- .get_inner()
- .read()
- .map(|inner| inner.time.last_timestamp())
- .unwrap_or_else(|_| panic!("case {}: failed reading timestamp", new_time));
+ let timestamp_after = node.time.current_timestamp();
assert_eq!(
expected_response, actual_response,
diff --git a/src/node/time.rs b/src/node/time.rs
index 068f34da..379a6e0b 100644
--- a/src/node/time.rs
+++ b/src/node/time.rs
@@ -1,82 +1,225 @@
use anyhow::anyhow;
-use std::sync::{Arc, RwLock};
+use std::collections::VecDeque;
+use std::sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard};
+
+/// Shared readable view on time.
+pub trait ReadTime {
+ /// Returns timestamp (in seconds) that the clock is currently on.
+ fn current_timestamp(&self) -> u64;
+
+ /// Peek at what the next call to `advance_timestamp` will return.
+ fn peek_next_timestamp(&self) -> u64;
+}
+
+/// Writeable view on time management. The owner of this view should be able to treat it as
+/// exclusive access to the underlying clock.
+pub trait AdvanceTime: ReadTime {
+ /// Advances clock to the next timestamp and returns that timestamp in seconds.
+ ///
+ /// Subsequent calls to this method return monotonically increasing values. Time difference
+ /// between calls is implementation-specific.
+ fn advance_timestamp(&mut self) -> u64;
+}
/// Manages timestamps (in seconds) across the system.
///
/// Clones always agree on the underlying timestamp and updating one affects all other instances.
#[derive(Clone, Debug, Default)]
pub struct TimestampManager {
- /// The latest timestamp (in seconds) that has already been used.
- last_timestamp: Arc>,
+ internal: Arc>,
}
impl TimestampManager {
- pub fn new(last_timestamp: u64) -> TimestampManager {
+ pub fn new(current_timestamp: u64) -> TimestampManager {
TimestampManager {
- last_timestamp: Arc::new(RwLock::new(last_timestamp)),
+ internal: Arc::new(RwLock::new(TimestampManagerInternal {
+ current_timestamp,
+ next_timestamp: None,
+ interval: None,
+ })),
}
}
- /// Returns the last timestamp (in seconds) that has already been used.
- pub fn last_timestamp(&self) -> u64 {
- *self
- .last_timestamp
+ fn get(&self) -> RwLockReadGuard {
+ self.internal
.read()
.expect("TimestampManager lock is poisoned")
}
- /// Returns the next unique timestamp (in seconds) to be used.
- pub fn next_timestamp(&self) -> u64 {
- let mut guard = self
- .last_timestamp
+ fn get_mut(&self) -> RwLockWriteGuard {
+ self.internal
.write()
- .expect("TimestampManager lock is poisoned");
- let next_timestamp = *guard + 1;
- *guard = next_timestamp;
-
- next_timestamp
+ .expect("TimestampManager lock is poisoned")
}
/// Sets last used timestamp (in seconds) to the provided value and returns the difference
/// between new value and old value (represented as a signed number of seconds).
- pub fn set_last_timestamp_unchecked(&self, timestamp: u64) -> i128 {
- let mut guard = self
- .last_timestamp
- .write()
- .expect("TimestampManager lock is poisoned");
- let diff = (timestamp as i128).saturating_sub(*guard as i128);
- *guard = timestamp;
+ pub fn set_current_timestamp_unchecked(&self, timestamp: u64) -> i128 {
+ let mut this = self.get_mut();
+ let diff = (timestamp as i128).saturating_sub(this.current_timestamp as i128);
+ this.reset_to(timestamp);
diff
}
- /// Advances internal timestamp (in seconds) to the provided value.
+ /// Forces clock to return provided value as the next timestamp. Time skip will not be performed
+ /// before the next invocation of `advance_timestamp`.
///
/// Expects provided timestamp to be in the future, returns error otherwise.
- pub fn advance_timestamp(&self, timestamp: u64) -> anyhow::Result<()> {
- let mut guard = self
- .last_timestamp
- .write()
- .expect("TimestampManager lock is poisoned");
- if timestamp < *guard {
+ pub fn enforce_next_timestamp(&self, timestamp: u64) -> anyhow::Result<()> {
+ let mut this = self.get_mut();
+ if timestamp <= this.current_timestamp {
Err(anyhow!(
- "timestamp ({}) must be greater or equal than current timestamp ({})",
+ "timestamp ({}) must be greater than the last used timestamp ({})",
timestamp,
- *guard
+ this.current_timestamp
))
} else {
- *guard = timestamp;
+ this.next_timestamp.replace(timestamp);
Ok(())
}
}
/// Fast-forwards time by the given amount of seconds.
pub fn increase_time(&self, seconds: u64) -> u64 {
- let mut guard = self
- .last_timestamp
- .write()
- .expect("TimestampManager lock is poisoned");
- let next = guard.saturating_add(seconds);
- *guard = next;
+ let mut this = self.get_mut();
+ let next = this.current_timestamp.saturating_add(seconds);
+ this.reset_to(next);
next
}
+
+ /// Sets an interval to use when computing the next timestamp
+ ///
+ /// If an interval already exists, this will update the interval, otherwise a new interval will
+ /// be set starting with the current timestamp.
+ pub fn set_block_timestamp_interval(&self, seconds: u64) {
+ self.get_mut().interval.replace(seconds);
+ }
+
+ /// Removes the interval. Returns true if it existed before being removed, false otherwise.
+ pub fn remove_block_timestamp_interval(&self) -> bool {
+ self.get_mut().interval.take().is_some()
+ }
+
+ /// Returns an exclusively owned writeable view on this [`TimeManager`] instance.
+ ///
+ /// Use this method when you need to ensure that no one else can access [`TimeManager`] during
+ /// this view's lifetime.
+ pub fn lock(&self) -> impl AdvanceTime + '_ {
+ self.lock_with_offsets([])
+ }
+
+ /// Returns an exclusively owned writeable view on this [`TimeManager`] instance where first N
+ /// timestamps will be offset by the provided amount of seconds (where `N` is the size of
+ /// iterator).
+ ///
+ /// Use this method when you need to ensure that no one else can access [`TimeManager`] during
+ /// this view's lifetime while also pre-setting first `N` returned timestamps.
+ pub fn lock_with_offsets<'a, I: IntoIterator- >(
+ &'a self,
+ offsets: I,
+ ) -> impl AdvanceTime + 'a
+ where
+ ::IntoIter: 'a,
+ {
+ let guard = self.get_mut();
+ TimeLockWithOffsets {
+ start_timestamp: guard.peek_next_timestamp(),
+ guard,
+ offsets: offsets.into_iter().collect::>(),
+ }
+ }
+}
+
+impl ReadTime for TimestampManager {
+ fn current_timestamp(&self) -> u64 {
+ (*self.get()).current_timestamp()
+ }
+
+ fn peek_next_timestamp(&self) -> u64 {
+ (*self.get()).peek_next_timestamp()
+ }
+}
+
+#[derive(Debug, Default)]
+struct TimestampManagerInternal {
+ /// The current timestamp (in seconds). This timestamp is considered to be used already: there
+ /// might be a logical event that already happened on that timestamp (e.g. a block was sealed
+ /// with this timestamp).
+ current_timestamp: u64,
+ /// The next timestamp (in seconds) that the clock will be forced to advance to.
+ next_timestamp: Option,
+ /// The interval to use when determining the next timestamp to advance to.
+ interval: Option,
+}
+
+impl TimestampManagerInternal {
+ fn reset_to(&mut self, timestamp: u64) {
+ self.next_timestamp.take();
+ self.current_timestamp = timestamp;
+ }
+
+ fn interval(&self) -> u64 {
+ self.interval.unwrap_or(1)
+ }
+}
+
+impl ReadTime for TimestampManagerInternal {
+ fn current_timestamp(&self) -> u64 {
+ self.current_timestamp
+ }
+
+ fn peek_next_timestamp(&self) -> u64 {
+ self.next_timestamp
+ .unwrap_or_else(|| self.current_timestamp.saturating_add(self.interval()))
+ }
+}
+
+impl AdvanceTime for TimestampManagerInternal {
+ fn advance_timestamp(&mut self) -> u64 {
+ let next_timestamp = match self.next_timestamp.take() {
+ Some(next_timestamp) => next_timestamp,
+ None => self.current_timestamp.saturating_add(self.interval()),
+ };
+
+ self.current_timestamp = next_timestamp;
+ next_timestamp
+ }
+}
+
+struct TimeLockWithOffsets<'a> {
+ /// The first timestamp that would have been returned without accounting for offsets
+ start_timestamp: u64,
+ /// Exclusive writable ownership over the corresponding [`TimestampManager`]
+ guard: RwLockWriteGuard<'a, TimestampManagerInternal>,
+ /// A queue of offsets (relative to `start_timestamp`) to be used for next `N` timestamps
+ offsets: VecDeque,
+}
+
+impl ReadTime for TimeLockWithOffsets<'_> {
+ fn current_timestamp(&self) -> u64 {
+ self.guard.current_timestamp()
+ }
+
+ fn peek_next_timestamp(&self) -> u64 {
+ match self.offsets.front() {
+ Some(offset) => self.start_timestamp.saturating_add(*offset),
+ None => self.guard.peek_next_timestamp(),
+ }
+ }
+}
+
+impl AdvanceTime for TimeLockWithOffsets<'_> {
+ fn advance_timestamp(&mut self) -> u64 {
+ match self.offsets.pop_front() {
+ Some(offset) => {
+ let timestamp = self.start_timestamp.saturating_add(offset);
+ // Persist last used timestamp in the underlying state as this instance can be
+ // dropped before we finish iterating all values.
+ self.guard.reset_to(timestamp);
+
+ timestamp
+ }
+ None => self.guard.advance_timestamp(),
+ }
+ }
}
diff --git a/src/node/zks.rs b/src/node/zks.rs
index fc383ee8..17049bda 100644
--- a/src/node/zks.rs
+++ b/src/node/zks.rs
@@ -47,7 +47,7 @@ impl ZksNamespa
"Failed to acquire read lock for inner node state.",
)))
})
- .and_then(|reader| reader.estimate_gas_impl(req))
+ .and_then(|reader| reader.estimate_gas_impl(&self.time, req))
.into_boxed_future()
}