From e3bb595ea0298c2991c33782b17673bdfe3df0d6 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Thu, 16 Nov 2023 14:14:57 +0100 Subject: [PATCH 01/14] made consensus actor write finalized blocks directly to storage --- node/actors/bft/src/io.rs | 3 - node/actors/bft/src/lib.rs | 14 +-- node/actors/bft/src/replica/block.rs | 19 ++-- node/actors/bft/src/replica/leader_commit.rs | 6 +- node/actors/bft/src/replica/leader_prepare.rs | 6 +- node/actors/bft/src/replica/new_view.rs | 4 +- node/actors/bft/src/replica/state_machine.rs | 83 +++++++--------- node/actors/bft/src/replica/tests.rs | 3 +- node/actors/bft/src/testonly/make.rs | 4 +- node/actors/bft/src/testonly/node.rs | 23 +---- node/actors/bft/src/testonly/run.rs | 98 +++++++++---------- node/actors/executor/src/io.rs | 15 --- node/actors/executor/src/lib.rs | 24 ++--- node/actors/executor/src/tests.rs | 59 +++-------- node/libs/storage/src/lib.rs | 2 +- node/libs/storage/src/replica_state.rs | 35 ++++--- node/tools/src/main.rs | 10 +- 17 files changed, 165 insertions(+), 243 deletions(-) diff --git a/node/actors/bft/src/io.rs b/node/actors/bft/src/io.rs index 3636e051..b29dc758 100644 --- a/node/actors/bft/src/io.rs +++ b/node/actors/bft/src/io.rs @@ -1,7 +1,6 @@ //! Input and output messages for the Consensus actor. These are processed by the executor actor. use zksync_consensus_network::io::{ConsensusInputMessage, ConsensusReq}; -use zksync_consensus_roles::validator; /// All the messages that other actors can send to the Consensus actor. #[derive(Debug)] @@ -15,8 +14,6 @@ pub enum InputMessage { pub enum OutputMessage { /// Message types to the Network actor. Network(ConsensusInputMessage), - /// Message types to the Sync actor. - FinalizedBlock(validator::FinalBlock), } impl From for OutputMessage { diff --git a/node/actors/bft/src/lib.rs b/node/actors/bft/src/lib.rs index dcc0b142..dda99da5 100644 --- a/node/actors/bft/src/lib.rs +++ b/node/actors/bft/src/lib.rs @@ -21,7 +21,7 @@ use inner::ConsensusInner; use tracing::{info, instrument}; use zksync_concurrency::ctx; use zksync_consensus_roles::validator; -use zksync_consensus_storage::FallbackReplicaStateStore; +use zksync_consensus_storage::ReplicaStore; use zksync_consensus_utils::pipe::ActorPipe; mod inner; @@ -53,7 +53,7 @@ impl Consensus { pipe: ActorPipe, secret_key: validator::SecretKey, validator_set: validator::ValidatorSet, - storage: FallbackReplicaStateStore, + storage: ReplicaStore, ) -> anyhow::Result { Ok(Consensus { inner: ConsensusInner { @@ -69,7 +69,7 @@ impl Consensus { /// Starts the Consensus actor. It will start running, processing incoming messages and /// sending output messages. This is a blocking method. #[instrument(level = "trace", ret)] - pub fn run(mut self, ctx: &ctx::Ctx) -> anyhow::Result<()> { + pub async fn run(mut self, ctx: &ctx::Ctx) -> anyhow::Result<()> { info!( "Starting consensus actor {:?}", self.inner.secret_key.public() @@ -78,6 +78,7 @@ impl Consensus { // We need to start the replica before processing inputs. self.replica .start(ctx, &self.inner) + .await .context("replica.start()")?; // This is the infinite loop where the consensus actually runs. The validator waits for either @@ -87,7 +88,7 @@ impl Consensus { .inner .pipe .recv(&ctx.with_deadline(self.replica.timeout_deadline)) - .block() + .await .ok(); // We check if the context is active before processing the input. If the context is not active, @@ -114,7 +115,8 @@ impl Consensus { validator::ConsensusMsg::LeaderPrepare(_) | validator::ConsensusMsg::LeaderCommit(_) => { self.replica - .process_input(ctx, &self.inner, Some(req.msg))?; + .process_input(ctx, &self.inner, Some(req.msg)) + .await?; } } // Notify network actor that the message has been processed. @@ -122,7 +124,7 @@ impl Consensus { let _ = req.ack.send(()); } None => { - self.replica.process_input(ctx, &self.inner, None)?; + self.replica.process_input(ctx, &self.inner, None).await?; } } } diff --git a/node/actors/bft/src/replica/block.rs b/node/actors/bft/src/replica/block.rs index 92985b74..db4ae8bb 100644 --- a/node/actors/bft/src/replica/block.rs +++ b/node/actors/bft/src/replica/block.rs @@ -1,6 +1,8 @@ use super::StateMachine; -use crate::{inner::ConsensusInner, io::OutputMessage}; +use crate::inner::ConsensusInner; +use anyhow::Context as _; use tracing::{info, instrument}; +use zksync_concurrency::ctx; use zksync_consensus_roles::validator; impl StateMachine { @@ -8,11 +10,12 @@ impl StateMachine { /// block proposal cache for the matching block, and if we find it we build the block. /// If this method succeeds, it sends the finalized block to the executor. #[instrument(level = "trace", ret)] - pub(crate) fn build_block( + pub(crate) async fn save_block( &mut self, + ctx: &ctx::Ctx, consensus: &ConsensusInner, commit_qc: &validator::CommitQC, - ) { + ) -> anyhow::Result<()> { // TODO(gprusak): for availability of finalized blocks, // replicas should be able to broadcast highest quorums without // the corresponding block (same goes for synchronization). @@ -20,10 +23,10 @@ impl StateMachine { .block_proposal_cache .get(&commit_qc.message.proposal.number) else { - return; + return Ok(()); }; let Some(payload) = cache.get(&commit_qc.message.proposal.payload) else { - return; + return Ok(()); }; let block = validator::FinalBlock { header: commit_qc.message.proposal, @@ -35,7 +38,9 @@ impl StateMachine { "Finalized a block!\nFinal block: {:#?}", block.header.hash() ); - - consensus.pipe.send(OutputMessage::FinalizedBlock(block)); + self.storage + .put_block(ctx, &block) + .await + .context("store.put_block()") } } diff --git a/node/actors/bft/src/replica/leader_commit.rs b/node/actors/bft/src/replica/leader_commit.rs index ba0fd3e6..5eccb92d 100644 --- a/node/actors/bft/src/replica/leader_commit.rs +++ b/node/actors/bft/src/replica/leader_commit.rs @@ -41,7 +41,7 @@ impl StateMachine { /// Processes a leader commit message. We can approve this leader message even if we /// don't have the block proposal stored. It is enough to see the justification. #[instrument(level = "trace", err)] - pub(crate) fn process_leader_commit( + pub(crate) async fn process_leader_commit( &mut self, ctx: &ctx::Ctx, consensus: &ConsensusInner, @@ -86,7 +86,8 @@ impl StateMachine { // ----------- All checks finished. Now we process the message. -------------- // Try to create a finalized block with this CommitQC and our block proposal cache. - self.build_block(consensus, &message.justification); + self.save_block(ctx, consensus, &message.justification) + .await?; // Update the state machine. We don't update the view and phase (or backup our state) here // because we will do it when we start the new view. @@ -97,6 +98,7 @@ impl StateMachine { // Start a new view. But first we skip to the view of this message. self.view = view; self.start_new_view(ctx, consensus) + .await .context("start_new_view()")?; Ok(()) diff --git a/node/actors/bft/src/replica/leader_prepare.rs b/node/actors/bft/src/replica/leader_prepare.rs index 47b21744..ffed5991 100644 --- a/node/actors/bft/src/replica/leader_prepare.rs +++ b/node/actors/bft/src/replica/leader_prepare.rs @@ -108,7 +108,7 @@ pub(crate) enum Error { impl StateMachine { /// Processes a leader prepare message. #[instrument(level = "trace", ret)] - pub(crate) fn process_leader_prepare( + pub(crate) async fn process_leader_prepare( &mut self, ctx: &ctx::Ctx, consensus: &ConsensusInner, @@ -189,7 +189,7 @@ impl StateMachine { // Try to create a finalized block with this CommitQC and our block proposal cache. // This gives us another chance to finalize a block that we may have missed before. - self.build_block(consensus, &highest_qc); + self.save_block(ctx, consensus, &highest_qc).await?; // ----------- Checking the block proposal -------------- @@ -276,7 +276,7 @@ impl StateMachine { } // Backup our state. - self.backup_state(ctx).context("backup_state()")?; + self.backup_state(ctx).await.context("backup_state()")?; // Send the replica message to the leader. let output_message = ConsensusInputMessage { diff --git a/node/actors/bft/src/replica/new_view.rs b/node/actors/bft/src/replica/new_view.rs index f9571f2c..9ccc34c9 100644 --- a/node/actors/bft/src/replica/new_view.rs +++ b/node/actors/bft/src/replica/new_view.rs @@ -9,7 +9,7 @@ use zksync_consensus_roles::validator; impl StateMachine { /// This blocking method is used whenever we start a new view. #[instrument(level = "trace", err)] - pub(crate) fn start_new_view( + pub(crate) async fn start_new_view( &mut self, ctx: &ctx::Ctx, consensus: &ConsensusInner, @@ -27,7 +27,7 @@ impl StateMachine { .retain(|k, _| k > &self.high_qc.message.proposal.number); // Backup our state. - self.backup_state(ctx).context("backup_state")?; + self.backup_state(ctx).await.context("backup_state")?; // Send the replica message to the next leader. let output_message = ConsensusInputMessage { diff --git a/node/actors/bft/src/replica/state_machine.rs b/node/actors/bft/src/replica/state_machine.rs index a3a3e983..dcc2f480 100644 --- a/node/actors/bft/src/replica/state_machine.rs +++ b/node/actors/bft/src/replica/state_machine.rs @@ -2,10 +2,10 @@ use crate::{metrics, ConsensusInner}; use anyhow::Context as _; use std::collections::{BTreeMap, HashMap}; use tracing::instrument; -use zksync_concurrency::{ctx, metrics::LatencyHistogramExt as _, scope, time}; +use zksync_concurrency::{ctx, metrics::LatencyHistogramExt as _, time}; use zksync_consensus_roles::validator; use zksync_consensus_storage as storage; -use zksync_consensus_storage::{FallbackReplicaStateStore, StorageError}; +use zksync_consensus_storage::ReplicaStore; /// The StateMachine struct contains the state of the replica. This is the most complex state machine and is responsible /// for validating and voting on blocks. When participating in consensus we are always a replica. @@ -24,17 +24,15 @@ pub(crate) struct StateMachine { BTreeMap>, /// The deadline to receive an input message. pub(crate) timeout_deadline: time::Deadline, - /// A reference to the storage module. We use it to backup the replica state. - pub(crate) storage: FallbackReplicaStateStore, + /// A reference to the storage module. We use it to backup the replica state and store + /// finalized blocks. + pub(crate) storage: ReplicaStore, } impl StateMachine { /// Creates a new StateMachine struct. We try to recover a past state from the storage module, /// otherwise we initialize the state machine with whatever head block we have. - pub(crate) async fn new( - ctx: &ctx::Ctx, - storage: FallbackReplicaStateStore, - ) -> anyhow::Result { + pub(crate) async fn new(ctx: &ctx::Ctx, storage: ReplicaStore) -> anyhow::Result { let backup = storage.replica_state(ctx).await?; let mut block_proposal_cache: BTreeMap<_, HashMap<_, _>> = BTreeMap::new(); for proposal in backup.proposals { @@ -59,13 +57,14 @@ impl StateMachine { /// we are able to process inputs. If we are in the genesis block, then we start a new view, /// this will kick start the consensus algorithm. Otherwise, we just start the timer. #[instrument(level = "trace", ret)] - pub(crate) fn start( + pub(crate) async fn start( &mut self, ctx: &ctx::Ctx, consensus: &ConsensusInner, ) -> anyhow::Result<()> { if self.view == validator::ViewNumber(0) { self.start_new_view(ctx, consensus) + .await .context("start_new_view") } else { self.reset_timer(ctx); @@ -77,7 +76,7 @@ impl StateMachine { /// the main entry point for the state machine. We need read-access to the inner consensus struct. /// As a result, we can modify our state machine or send a message to the executor. #[instrument(level = "trace", ret)] - pub(crate) fn process_input( + pub(crate) async fn process_input( &mut self, ctx: &ctx::Ctx, consensus: &ConsensusInner, @@ -86,38 +85,42 @@ impl StateMachine { let Some(signed_msg) = input else { tracing::warn!("We timed out before receiving a message."); // Start new view. - self.start_new_view(ctx, consensus)?; + self.start_new_view(ctx, consensus).await?; return Ok(()); }; let now = ctx.now(); let label = match &signed_msg.msg { validator::ConsensusMsg::LeaderPrepare(_) => { - let res = - match self.process_leader_prepare(ctx, consensus, signed_msg.cast().unwrap()) { - Err(super::leader_prepare::Error::Internal(err)) => { - return Err(err).context("process_leader_prepare()") - } - Err(err) => { - tracing::warn!("process_leader_prepare(): {err:#}"); - Err(()) - } - Ok(()) => Ok(()), - }; + let res = match self + .process_leader_prepare(ctx, consensus, signed_msg.cast().unwrap()) + .await + { + Err(super::leader_prepare::Error::Internal(err)) => { + return Err(err).context("process_leader_prepare()") + } + Err(err) => { + tracing::warn!("process_leader_prepare(): {err:#}"); + Err(()) + } + Ok(()) => Ok(()), + }; metrics::ConsensusMsgLabel::LeaderPrepare.with_result(&res) } validator::ConsensusMsg::LeaderCommit(_) => { - let res = - match self.process_leader_commit(ctx, consensus, signed_msg.cast().unwrap()) { - Err(super::leader_commit::Error::Internal(err)) => { - return Err(err).context("process_leader_commit()") - } - Err(err) => { - tracing::warn!("process_leader_commit(): {err:#}"); - Err(()) - } - Ok(()) => Ok(()), - }; + let res = match self + .process_leader_commit(ctx, consensus, signed_msg.cast().unwrap()) + .await + { + Err(super::leader_commit::Error::Internal(err)) => { + return Err(err).context("process_leader_commit()") + } + Err(err) => { + tracing::warn!("process_leader_commit(): {err:#}"); + Err(()) + } + Ok(()) => Ok(()), + }; metrics::ConsensusMsgLabel::LeaderCommit.with_result(&res) } _ => unreachable!(), @@ -127,7 +130,7 @@ impl StateMachine { } /// Backups the replica state to disk. - pub(crate) fn backup_state(&self, ctx: &ctx::Ctx) -> anyhow::Result<()> { + pub(crate) async fn backup_state(&self, ctx: &ctx::Ctx) -> anyhow::Result<()> { let mut proposals = vec![]; for (number, payloads) in &self.block_proposal_cache { proposals.extend(payloads.values().map(|p| storage::Proposal { @@ -142,17 +145,7 @@ impl StateMachine { high_qc: self.high_qc.clone(), proposals, }; - - let store_result = scope::run_blocking!(ctx, |ctx, s| { - let backup_future = self.storage.put_replica_state(ctx, &backup); - s.spawn(backup_future).join(ctx).block()?; - Ok(()) - }); - match store_result { - Ok(()) => { /* Everything went fine */ } - Err(StorageError::Canceled(_)) => tracing::trace!("Storing replica state was canceled"), - Err(StorageError::Database(err)) => return Err(err), - } + self.storage.put_replica_state(ctx, &backup).await?; Ok(()) } } diff --git a/node/actors/bft/src/replica/tests.rs b/node/actors/bft/src/replica/tests.rs index 8fb3aedc..337a73c8 100644 --- a/node/actors/bft/src/replica/tests.rs +++ b/node/actors/bft/src/replica/tests.rs @@ -21,10 +21,11 @@ async fn start_new_view_not_leader() { consensus.replica.high_qc.message.view = ViewNumber(0); scope::run!(ctx, |ctx, s| { - s.spawn_blocking(|| { + s.spawn(async { consensus .replica .start_new_view(ctx, &consensus.inner) + .await .unwrap(); Ok(()) }) diff --git a/node/actors/bft/src/testonly/make.rs b/node/actors/bft/src/testonly/make.rs index ca99edec..568096f3 100644 --- a/node/actors/bft/src/testonly/make.rs +++ b/node/actors/bft/src/testonly/make.rs @@ -7,7 +7,7 @@ use crate::{ use std::sync::Arc; use zksync_concurrency::ctx; use zksync_consensus_roles::validator; -use zksync_consensus_storage::{FallbackReplicaStateStore, InMemoryStorage}; +use zksync_consensus_storage::{InMemoryStorage, ReplicaStore}; use zksync_consensus_utils::pipe::{self, DispatcherPipe}; /// This creates a mock Consensus struct for unit tests. @@ -27,7 +27,7 @@ pub async fn make_consensus( consensus_pipe, key.clone(), validator_set.clone(), - FallbackReplicaStateStore::from_store(Arc::new(storage)), + ReplicaStore::from_store(Arc::new(storage)), ); let consensus = consensus .await diff --git a/node/actors/bft/src/testonly/node.rs b/node/actors/bft/src/testonly/node.rs index ae8b6f68..d696a325 100644 --- a/node/actors/bft/src/testonly/node.rs +++ b/node/actors/bft/src/testonly/node.rs @@ -1,19 +1,13 @@ use super::Fuzz; use crate::io; use rand::Rng; -use zksync_concurrency::{ctx, ctx::channel, scope}; +use std::sync::Arc; +use zksync_concurrency::{ctx, scope}; use zksync_consensus_network as network; use zksync_consensus_network::io::ConsensusInputMessage; -use zksync_consensus_roles::validator; +use zksync_consensus_storage::InMemoryStorage; use zksync_consensus_utils::pipe::DispatcherPipe; -/// A struct containing metrics information. Right now it's just a finalized block. -#[derive(Debug)] -pub(super) struct Metrics { - pub(crate) validator: validator::PublicKey, - pub(crate) finalized_block: validator::FinalBlock, -} - /// Enum representing the behavior of the node. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub(crate) enum Behavior { @@ -33,6 +27,7 @@ pub(crate) enum Behavior { pub(super) struct Node { pub(crate) net: network::testonly::Instance, pub(crate) behavior: Behavior, + pub(crate) storage: Arc, } impl Node { @@ -42,15 +37,12 @@ impl Node { ctx: &ctx::Ctx, consensus_pipe: DispatcherPipe, network_pipe: DispatcherPipe, - metrics: channel::UnboundedSender, ) -> anyhow::Result<()> { - let key = self.net.consensus_config().key.public(); let rng = &mut ctx.rng(); let mut net_recv = network_pipe.recv; let net_send = network_pipe.send; let mut con_recv = consensus_pipe.recv; let con_send = consensus_pipe.send; - scope::run!(ctx, |ctx, s| async { s.spawn(async { while let Ok(network_message) = net_recv.recv(ctx).await { @@ -69,13 +61,6 @@ impl Node { // Get the next message from the channel. Our response depends on what type of replica we are. while let Ok(msg) = con_recv.recv(ctx).await { match msg { - io::OutputMessage::FinalizedBlock(block) => { - // Send the finalized block to the watcher. - metrics.send(Metrics { - validator: key.clone(), - finalized_block: block, - }) - } io::OutputMessage::Network(mut message) => { let message_to_send = match self.behavior { Behavior::Offline => continue, diff --git a/node/actors/bft/src/testonly/run.rs b/node/actors/bft/src/testonly/run.rs index d1eb3eae..9581a556 100644 --- a/node/actors/bft/src/testonly/run.rs +++ b/node/actors/bft/src/testonly/run.rs @@ -1,15 +1,12 @@ -use super::{Behavior, Metrics, Node}; +use super::{Behavior, Node}; use crate::{testonly, Consensus}; use anyhow::Context; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; use tracing::Instrument as _; -use zksync_concurrency::{ctx, ctx::channel, oneshot, scope, signal}; +use zksync_concurrency::{ctx, oneshot, scope, signal, sync}; use zksync_consensus_network as network; use zksync_consensus_roles::validator; -use zksync_consensus_storage::{FallbackReplicaStateStore, InMemoryStorage}; +use zksync_consensus_storage::{BlockStore, InMemoryStorage, ReplicaStore}; use zksync_consensus_utils::pipe; #[derive(Clone, Copy)] @@ -30,63 +27,61 @@ impl Test { /// Run a test with the given parameters. pub(crate) async fn run(&self, ctx: &ctx::Ctx) -> anyhow::Result<()> { let rng = &mut ctx.rng(); - let nodes: Vec<_> = network::testonly::Instance::new(rng, self.nodes.len(), 1) + let nodes: Vec<_> = network::testonly::Instance::new(rng, self.nodes.len(), 1); + let keys: Vec<_> = nodes + .iter() + .map(|node| node.consensus_config().key.clone()) + .collect(); + let (genesis_block, _) = testonly::make_genesis(&keys, validator::Payload(vec![])); + let nodes: Vec<_> = nodes .into_iter() .enumerate() .map(|(i, net)| Node { net, behavior: self.nodes[i], + storage: Arc::new(InMemoryStorage::new(genesis_block.clone())), }) .collect(); - scope::run!(ctx, |ctx, s| async { - let (metrics_send, mut metrics_recv) = channel::unbounded(); - s.spawn_bg(run_nodes(ctx, self.network, &nodes, metrics_send)); - - // Get only the honest replicas. - let honest: HashSet<_> = nodes - .iter() - .filter(|node| node.behavior == Behavior::Honest) - .map(|node| node.net.consensus_config().key.public()) - .collect(); - assert!(!honest.is_empty()); - let mut finalized: HashMap = - HashMap::new(); - let mut observers: HashMap> = - HashMap::new(); - let mut fully_observed = 0; + // Get only the honest replicas. + let honest: Vec<_> = nodes + .iter() + .filter(|node| node.behavior == Behavior::Honest) + .collect(); + assert!(!honest.is_empty()); - while fully_observed < self.blocks_to_finalize { - let metric = metrics_recv.recv(ctx).await?; - if !honest.contains(&metric.validator) { - continue; - } - let block = metric.finalized_block; - let hash = block.header.hash(); - assert_eq!(*finalized.entry(block.header.number).or_insert(hash), hash); - let observers = observers.entry(block.header.number).or_default(); - if observers.insert(metric.validator.clone()) && observers.len() == honest.len() { - fully_observed += 1; - } + // Run the nodes until all honest nodes store enough finalized blocks. + scope::run!(ctx, |ctx, s| async { + s.spawn_bg(run_nodes(ctx, self.network, &nodes)); + for n in &honest { + s.spawn(async { + sync::wait_for( + ctx, + &mut n.storage.subscribe_to_block_writes(), + |block_number| block_number.0 >= self.blocks_to_finalize as u64, + ) + .await?; + Ok(()) + }); } Ok(()) }) - .await + .await?; + + // Check that the stored blocks are consistent. + for i in 0..self.blocks_to_finalize as u64 + 1 { + let i = validator::BlockNumber(i); + let want = honest[0].storage.block(ctx, i).await?; + for n in &honest[1..] { + assert_eq!(want, n.storage.block(ctx, i).await?); + } + } + Ok(()) } } /// Run a set of nodes. -async fn run_nodes( - ctx: &ctx::Ctx, - network: Network, - nodes: &[Node], - metrics: channel::UnboundedSender, -) -> anyhow::Result<()> { - let keys: Vec<_> = nodes - .iter() - .map(|node| node.net.consensus_config().key.clone()) - .collect(); - let (genesis_block, _) = testonly::make_genesis(&keys, validator::Payload(vec![])); +async fn run_nodes(ctx: &ctx::Ctx, network: Network, nodes: &[Node]) -> anyhow::Result<()> { let network_ready = signal::Once::new(); let mut network_pipes = HashMap::new(); let mut network_send = HashMap::new(); @@ -102,8 +97,7 @@ async fn run_nodes( network_pipes.insert(validator_key.public(), network_actor_pipe); s.spawn( async { - let storage = InMemoryStorage::new(genesis_block.clone()); - let storage = FallbackReplicaStateStore::from_store(Arc::new(storage)); + let storage = ReplicaStore::from_store(node.storage.clone()); let consensus = Consensus::new( ctx, @@ -117,8 +111,8 @@ async fn run_nodes( scope::run!(ctx, |ctx, s| async { network_ready.recv(ctx).await?; - s.spawn_blocking(|| consensus.run(ctx).context("consensus.run()")); - node.run_executor(ctx, consensus_pipe, network_pipe, metrics.clone()) + s.spawn(async { consensus.run(ctx).await.context("consensus.run()") }); + node.run_executor(ctx, consensus_pipe, network_pipe) .await .context("executor.run()") }) diff --git a/node/actors/executor/src/io.rs b/node/actors/executor/src/io.rs index 76b1df00..5379092f 100644 --- a/node/actors/executor/src/io.rs +++ b/node/actors/executor/src/io.rs @@ -1,6 +1,4 @@ //! Module to manage the communication between actors. It simply converts and forwards messages from and to each different actor. - -use crate::metrics; use tracing::instrument; use zksync_concurrency::{ ctx::{self, channel}, @@ -12,7 +10,6 @@ use zksync_consensus_bft::io::{ use zksync_consensus_network::io::{ InputMessage as NetworkInputMessage, OutputMessage as NetworkOutputMessage, }; -use zksync_consensus_roles::validator::FinalBlock; use zksync_consensus_sync_blocks::io::{ InputMessage as SyncBlocksInputMessage, OutputMessage as SyncBlocksOutputMessage, }; @@ -28,7 +25,6 @@ pub(super) struct Dispatcher { sync_blocks_output: channel::UnboundedReceiver, network_input: channel::UnboundedSender, network_output: channel::UnboundedReceiver, - blocks_sender: channel::UnboundedSender, } impl Dispatcher { @@ -37,7 +33,6 @@ impl Dispatcher { consensus_pipe: DispatcherPipe, sync_blocks_pipe: DispatcherPipe, network_pipe: DispatcherPipe, - blocks_sender: channel::UnboundedSender, ) -> Self { Dispatcher { consensus_input: consensus_pipe.send, @@ -46,7 +41,6 @@ impl Dispatcher { sync_blocks_output: sync_blocks_pipe.recv, network_input: network_pipe.send, network_output: network_pipe.recv, - blocks_sender, } } @@ -61,15 +55,6 @@ impl Dispatcher { ConsensusOutputMessage::Network(message) => { self.network_input.send(message.into()); } - ConsensusOutputMessage::FinalizedBlock(block) => { - let number_metric = &metrics::METRICS.finalized_block_number; - let current_number = number_metric.get(); - number_metric.set(current_number.max(block.header.number.0)); - // This works because this is the only place where `finalized_block_number` - // is modified, and there should be a single running `Dispatcher`. - - self.blocks_sender.send(block); - } } } Ok(()) diff --git a/node/actors/executor/src/lib.rs b/node/actors/executor/src/lib.rs index a1d779ca..2e99e180 100644 --- a/node/actors/executor/src/lib.rs +++ b/node/actors/executor/src/lib.rs @@ -2,12 +2,12 @@ use crate::io::Dispatcher; use anyhow::Context as _; -use std::{mem, sync::Arc}; -use zksync_concurrency::{ctx, ctx::channel, net, scope}; +use std::sync::Arc; +use zksync_concurrency::{ctx, net, scope}; use zksync_consensus_bft::{misc::consensus_threshold, Consensus}; use zksync_consensus_network as network; -use zksync_consensus_roles::{node, validator, validator::FinalBlock}; -use zksync_consensus_storage::{FallbackReplicaStateStore, ReplicaStateStore, WriteBlockStore}; +use zksync_consensus_roles::{node, validator}; +use zksync_consensus_storage::{ReplicaStateStore, ReplicaStore, WriteBlockStore}; use zksync_consensus_sync_blocks::SyncBlocks; use zksync_consensus_utils::pipe; @@ -29,8 +29,6 @@ struct ValidatorExecutor { key: validator::SecretKey, /// Store for replica state. replica_state_store: Arc, - /// Sender of blocks finalized by the consensus algorithm. - blocks_sender: channel::UnboundedSender, } impl ValidatorExecutor { @@ -85,7 +83,6 @@ impl Executor { config: ConsensusConfig, key: validator::SecretKey, replica_state_store: Arc, - blocks_sender: channel::UnboundedSender, ) -> anyhow::Result<()> { let public = &config.key; anyhow::ensure!( @@ -104,7 +101,6 @@ impl Executor { config, key, replica_state_store, - blocks_sender, }); } else { tracing::info!( @@ -142,31 +138,25 @@ impl Executor { } /// Runs this executor to completion. This should be spawned on a separate task. - pub async fn run(mut self, ctx: &ctx::Ctx) -> anyhow::Result<()> { + pub async fn run(self, ctx: &ctx::Ctx) -> anyhow::Result<()> { let network_config = self.network_config(); // Generate the communication pipes. We have one for each actor. let (consensus_actor_pipe, consensus_dispatcher_pipe) = pipe::new(); let (sync_blocks_actor_pipe, sync_blocks_dispatcher_pipe) = pipe::new(); let (network_actor_pipe, network_dispatcher_pipe) = pipe::new(); - let blocks_sender = if let Some(validator) = &mut self.validator { - mem::replace(&mut validator.blocks_sender, channel::unbounded().0) - } else { - channel::unbounded().0 - }; // Create the IO dispatcher. let mut dispatcher = Dispatcher::new( consensus_dispatcher_pipe, sync_blocks_dispatcher_pipe, network_dispatcher_pipe, - blocks_sender, ); // Create each of the actors. let validator_set = &self.executor_config.validators; let consensus = if let Some(validator) = self.validator { let consensus_storage = - FallbackReplicaStateStore::new(validator.replica_state_store, self.storage.clone()); + ReplicaStore::new(validator.replica_state_store, self.storage.clone()); let consensus = Consensus::new( ctx, consensus_actor_pipe, @@ -208,7 +198,7 @@ impl Executor { .context("Network stopped") }); if let Some(consensus) = consensus { - s.spawn_blocking(|| consensus.run(ctx).context("Consensus stopped")); + s.spawn(async { consensus.run(ctx).await.context("Consensus stopped") }); } sync_blocks.run(ctx).await.context("Syncing blocks stopped") }) diff --git a/node/actors/executor/src/tests.rs b/node/actors/executor/src/tests.rs index 5e540f50..cd8f6d8e 100644 --- a/node/actors/executor/src/tests.rs +++ b/node/actors/executor/src/tests.rs @@ -3,45 +3,15 @@ use super::*; use crate::testonly::FullValidatorConfig; use zksync_concurrency::{sync, testonly::abort_on_panic}; use zksync_consensus_roles::validator::{BlockNumber, Payload}; -use zksync_consensus_storage::{BlockStore, InMemoryStorage, StorageError}; - -async fn store_final_blocks( - ctx: &ctx::Ctx, - mut blocks_receiver: channel::UnboundedReceiver, - storage: Arc, -) -> anyhow::Result<()> { - while let Ok(block) = blocks_receiver.recv(ctx).await { - tracing::trace!(number = %block.header.number, "Finalized new block"); - if let Err(err) = storage.put_block(ctx, &block).await { - if matches!(err, StorageError::Canceled(_)) { - break; - } else { - return Err(err.into()); - } - } - } - Ok(()) -} +use zksync_consensus_storage::{BlockStore, InMemoryStorage}; impl FullValidatorConfig { - fn into_executor( - self, - storage: Arc, - ) -> ( - Executor, - channel::UnboundedReceiver, - ) { - let (blocks_sender, blocks_receiver) = channel::unbounded(); + fn into_executor(self, storage: Arc) -> Executor { let mut executor = Executor::new(self.node_config, self.node_key, storage.clone()).unwrap(); executor - .set_validator( - self.consensus_config, - self.validator_key, - storage, - blocks_sender, - ) + .set_validator(self.consensus_config, self.validator_key, storage) .unwrap(); - (executor, blocks_receiver) + executor } } @@ -55,19 +25,16 @@ async fn executing_single_validator() { let genesis_block = &validator.node_config.genesis_block; let storage = InMemoryStorage::new(genesis_block.clone()); let storage = Arc::new(storage); - let (executor, mut blocks_receiver) = validator.into_executor(storage); + let executor = validator.into_executor(storage.clone()); scope::run!(ctx, |ctx, s| async { s.spawn_bg(executor.run(ctx)); - - let mut expected_block_number = BlockNumber(1); - while expected_block_number < BlockNumber(5) { - let final_block = blocks_receiver.recv(ctx).await?; - tracing::trace!(number = %final_block.header.number, "Finalized new block"); - assert_eq!(final_block.header.number, expected_block_number); - expected_block_number = expected_block_number.next(); - } - anyhow::Ok(()) + let want = BlockNumber(5); + sync::wait_for(ctx, &mut storage.subscribe_to_block_writes(), |n| { + n >= &want + }) + .await?; + Ok(()) }) .await .unwrap(); @@ -89,7 +56,7 @@ async fn executing_validator_and_external_node() { let external_node_storage = Arc::new(external_node_storage); let mut en_subscriber = external_node_storage.subscribe_to_block_writes(); - let (validator, blocks_receiver) = validator.into_executor(validator_storage.clone()); + let validator = validator.into_executor(validator_storage.clone()); let external_node = Executor::new( external_node.node_config, external_node.node_key, @@ -100,8 +67,6 @@ async fn executing_validator_and_external_node() { scope::run!(ctx, |ctx, s| async { s.spawn_bg(validator.run(ctx)); s.spawn_bg(external_node.run(ctx)); - s.spawn_bg(store_final_blocks(ctx, blocks_receiver, validator_storage)); - for _ in 0..5 { let number = *sync::changed(ctx, &mut en_subscriber).await?; tracing::trace!(%number, "External node received block"); diff --git a/node/libs/storage/src/lib.rs b/node/libs/storage/src/lib.rs index 0afd516d..9b0947f4 100644 --- a/node/libs/storage/src/lib.rs +++ b/node/libs/storage/src/lib.rs @@ -16,7 +16,7 @@ mod types; pub use crate::rocksdb::RocksdbStorage; pub use crate::{ in_memory::InMemoryStorage, - replica_state::FallbackReplicaStateStore, + replica_state::ReplicaStore, traits::{BlockStore, ReplicaStateStore, WriteBlockStore}, types::{Proposal, ReplicaState, StorageError, StorageResult}, }; diff --git a/node/libs/storage/src/replica_state.rs b/node/libs/storage/src/replica_state.rs index e10dc547..8d4da417 100644 --- a/node/libs/storage/src/replica_state.rs +++ b/node/libs/storage/src/replica_state.rs @@ -1,7 +1,7 @@ //! `FallbackReplicaStateStore` type. use crate::{ - traits::{BlockStore, ReplicaStateStore}, + traits::{ReplicaStateStore, WriteBlockStore}, types::{ReplicaState, StorageResult}, }; use std::sync::Arc; @@ -22,35 +22,35 @@ impl From for ReplicaState { /// [`ReplicaStateStore`] wrapper that falls back to a specified block store. #[derive(Debug, Clone)] -pub struct FallbackReplicaStateStore { - base: Arc, - fallback: Arc, +pub struct ReplicaStore { + state: Arc, + blocks: Arc, } -impl FallbackReplicaStateStore { +impl ReplicaStore { /// Creates a store from a type implementing both replica state and block storage. pub fn from_store(store: Arc) -> Self where - S: ReplicaStateStore + BlockStore + 'static, + S: ReplicaStateStore + WriteBlockStore + 'static, { Self { - base: store.clone(), - fallback: store, + state: store.clone(), + blocks: store, } } /// Creates a new replica state store with a fallback. - pub fn new(base: Arc, fallback: Arc) -> Self { - Self { base, fallback } + pub fn new(state: Arc, blocks: Arc) -> Self { + Self { state, blocks } } /// Gets the replica state. If it's not present, falls back to recover it from the fallback block store. pub async fn replica_state(&self, ctx: &ctx::Ctx) -> StorageResult { - let replica_state = self.base.replica_state(ctx).await?; + let replica_state = self.state.replica_state(ctx).await?; if let Some(replica_state) = replica_state { Ok(replica_state) } else { - let head_block = self.fallback.head_block(ctx).await?; + let head_block = self.blocks.head_block(ctx).await?; Ok(ReplicaState::from(head_block.justification)) } } @@ -61,6 +61,15 @@ impl FallbackReplicaStateStore { ctx: &ctx::Ctx, replica_state: &ReplicaState, ) -> StorageResult<()> { - self.base.put_replica_state(ctx, replica_state).await + self.state.put_replica_state(ctx, replica_state).await + } + + /// Puts a block into this storage. + pub async fn put_block( + &self, + ctx: &ctx::Ctx, + block: &validator::FinalBlock, + ) -> StorageResult<()> { + self.blocks.put_block(ctx, block).await } } diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index b781a248..9f5447b0 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -12,7 +12,7 @@ use tracing::metadata::LevelFilter; use tracing_subscriber::{prelude::*, Registry}; use vise_exporter::MetricsExporter; use zksync_concurrency::{ - ctx::{self, channel}, + ctx::{self}, scope, time, }; use zksync_consensus_executor::Executor; @@ -112,14 +112,8 @@ async fn main() -> anyhow::Result<()> { let mut executor = Executor::new(configs.executor, configs.node_key, storage.clone()) .context("Executor::new()")?; if let Some((consensus_config, validator_key)) = configs.consensus { - let blocks_sender = channel::unbounded().0; // Just drop finalized blocks executor - .set_validator( - consensus_config, - validator_key, - storage.clone(), - blocks_sender, - ) + .set_validator(consensus_config, validator_key, storage.clone()) .context("Executor::set_validator()")?; } From ba3bbda9706624a590b61550d4b64e49c339271c Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Thu, 16 Nov 2023 17:59:23 +0100 Subject: [PATCH 02/14] restored metrics --- node/actors/bft/src/metrics.rs | 2 ++ node/actors/bft/src/replica/block.rs | 7 ++++++- node/actors/executor/src/lib.rs | 1 - node/actors/executor/src/metrics.rs | 15 --------------- 4 files changed, 8 insertions(+), 17 deletions(-) delete mode 100644 node/actors/executor/src/metrics.rs diff --git a/node/actors/bft/src/metrics.rs b/node/actors/bft/src/metrics.rs index 8fef767e..6412860e 100644 --- a/node/actors/bft/src/metrics.rs +++ b/node/actors/bft/src/metrics.rs @@ -68,6 +68,8 @@ pub(crate) struct ConsensusMetrics { /// Latency of processing messages by the leader. #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] pub(crate) leader_processing_latency: Family>, + /// Number of the last finalized block observed by the node. + pub(crate) finalized_block_number: Gauge, } /// Global instance of [`ConsensusMetrics`]. diff --git a/node/actors/bft/src/replica/block.rs b/node/actors/bft/src/replica/block.rs index db4ae8bb..66a4562b 100644 --- a/node/actors/bft/src/replica/block.rs +++ b/node/actors/bft/src/replica/block.rs @@ -41,6 +41,11 @@ impl StateMachine { self.storage .put_block(ctx, &block) .await - .context("store.put_block()") + .context("store.put_block()")?; + + let number_metric = &crate::metrics::METRICS.finalized_block_number; + let current_number = number_metric.get(); + number_metric.set(current_number.max(block.header.number.0)); + Ok(()) } } diff --git a/node/actors/executor/src/lib.rs b/node/actors/executor/src/lib.rs index 2e99e180..e61abdbc 100644 --- a/node/actors/executor/src/lib.rs +++ b/node/actors/executor/src/lib.rs @@ -13,7 +13,6 @@ use zksync_consensus_utils::pipe; mod config; mod io; -mod metrics; pub mod testonly; #[cfg(test)] mod tests; diff --git a/node/actors/executor/src/metrics.rs b/node/actors/executor/src/metrics.rs deleted file mode 100644 index e404722f..00000000 --- a/node/actors/executor/src/metrics.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! Metrics defined by the executor module. - -use vise::{Gauge, Metrics}; - -/// Metrics defined by the executor module. -#[derive(Debug, Metrics)] -#[metrics(prefix = "executor")] -pub(crate) struct ExecutorMetrics { - /// Number of the last finalized block observed by the node. - pub(crate) finalized_block_number: Gauge, -} - -/// Global instance of [`ExecutorMetrics`]. -#[vise::register] -pub(crate) static METRICS: vise::Global = vise::Global::new(); From 035b100c27d2a8bd27b56c6ac9c3ea2b186703dd Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Thu, 16 Nov 2023 19:06:43 +0100 Subject: [PATCH 03/14] extracted payload provider and payload verification --- node/Cargo.lock | 1 + node/actors/bft/Cargo.toml | 1 + node/actors/bft/src/leader/replica_commit.rs | 2 +- node/actors/bft/src/leader/replica_prepare.rs | 17 +++++----- node/actors/bft/src/leader/state_machine.rs | 33 +++++++++++++------ node/actors/bft/src/lib.rs | 23 +++++++++---- node/actors/bft/src/testonly/make.rs | 21 ++++++++++-- node/actors/bft/src/testonly/run.rs | 3 +- node/actors/executor/src/lib.rs | 10 +++--- node/actors/executor/src/tests.rs | 8 ++++- node/libs/storage/src/replica_state.rs | 2 +- node/libs/storage/src/traits.rs | 11 ++++++- node/tools/src/main.rs | 7 +++- 13 files changed, 103 insertions(+), 36 deletions(-) diff --git a/node/Cargo.lock b/node/Cargo.lock index 7ed5fe20..1db9f77a 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -2581,6 +2581,7 @@ name = "zksync_consensus_bft" version = "0.1.0" dependencies = [ "anyhow", + "async-trait", "once_cell", "rand 0.8.5", "thiserror", diff --git a/node/actors/bft/Cargo.toml b/node/actors/bft/Cargo.toml index 82110ac5..f6bedd5d 100644 --- a/node/actors/bft/Cargo.toml +++ b/node/actors/bft/Cargo.toml @@ -16,6 +16,7 @@ zksync_consensus_utils.workspace = true zksync_protobuf.workspace = true anyhow.workspace = true +async-trait.workspace = true once_cell.workspace = true rand.workspace = true thiserror.workspace = true diff --git a/node/actors/bft/src/leader/replica_commit.rs b/node/actors/bft/src/leader/replica_commit.rs index fa7ea5cf..4538fe66 100644 --- a/node/actors/bft/src/leader/replica_commit.rs +++ b/node/actors/bft/src/leader/replica_commit.rs @@ -45,7 +45,7 @@ pub(crate) enum Error { } impl StateMachine { - #[instrument(level = "trace", ret)] + #[instrument(level = "trace", skip(self), ret)] pub(crate) fn process_replica_commit( &mut self, ctx: &ctx::Ctx, diff --git a/node/actors/bft/src/leader/replica_prepare.rs b/node/actors/bft/src/leader/replica_prepare.rs index e15c1626..c16621d5 100644 --- a/node/actors/bft/src/leader/replica_prepare.rs +++ b/node/actors/bft/src/leader/replica_prepare.rs @@ -1,6 +1,5 @@ use super::StateMachine; use crate::{inner::ConsensusInner, metrics}; -use rand::Rng; use std::collections::HashMap; use tracing::instrument; use zksync_concurrency::ctx; @@ -54,11 +53,14 @@ pub(crate) enum Error { /// Invalid `HighQC` message. #[error("invalid high QC: {0:#}")] InvalidHighQC(#[source] anyhow::Error), + /// Internal error. Unlike other error types, this one isn't supposed to be easily recoverable. + #[error("internal error: {0:#}")] + Internal(#[from] anyhow::Error), } impl StateMachine { - #[instrument(level = "trace", ret)] - pub(crate) fn process_replica_prepare( + #[instrument(level = "trace", skip(self), ret)] + pub(crate) async fn process_replica_prepare( &mut self, ctx: &ctx::Ctx, consensus: &ConsensusInner, @@ -181,11 +183,10 @@ impl StateMachine { Some(proposal) if proposal != highest_qc.message.proposal => (proposal, None), // The previous block was finalized, so we can propose a new block. _ => { - // TODO(bruno): For now we just create a block with a random payload. After we integrate with - // the execution layer we should have a call here to the mempool to get a real payload. - let mut payload = validator::Payload(vec![0; ConsensusInner::PAYLOAD_MAX_SIZE]); - ctx.rng().fill(&mut payload.0[..]); - + let payload = self + .payload_source + .propose(ctx, highest_qc.message.proposal.number.next()) + .await?; metrics::METRICS .leader_proposal_payload_size .observe(payload.0.len()); diff --git a/node/actors/bft/src/leader/state_machine.rs b/node/actors/bft/src/leader/state_machine.rs index 4380de83..32b652b3 100644 --- a/node/actors/bft/src/leader/state_machine.rs +++ b/node/actors/bft/src/leader/state_machine.rs @@ -1,6 +1,8 @@ -use crate::{metrics, ConsensusInner}; +use crate::{metrics, ConsensusInner, PayloadSource}; +use anyhow::Context as _; use std::{ collections::{BTreeMap, HashMap}, + sync::Arc, unreachable, }; use tracing::instrument; @@ -10,8 +12,9 @@ use zksync_consensus_roles::validator; /// The StateMachine struct contains the state of the leader. This is a simple state machine. We just store /// replica messages and produce leader messages (including proposing blocks) when we reach the threshold for /// those messages. When participating in consensus we are not the leader most of the time. -#[derive(Debug)] pub(crate) struct StateMachine { + /// Payload provider for the new blocks. + pub(crate) payload_source: Arc, /// The current view number. This might not match the replica's view number, we only have this here /// to make the leader advance monotonically in time and stop it from accepting messages from the past. pub(crate) view: validator::ViewNumber, @@ -36,9 +39,10 @@ pub(crate) struct StateMachine { impl StateMachine { /// Creates a new StateMachine struct. - #[instrument(level = "trace", ret)] - pub fn new(ctx: &ctx::Ctx) -> Self { + #[instrument(level = "trace", skip(payload_source))] + pub fn new(ctx: &ctx::Ctx, payload_source: Arc) -> Self { StateMachine { + payload_source, view: validator::ViewNumber(0), phase: validator::Phase::Prepare, phase_start: ctx.now(), @@ -51,21 +55,29 @@ impl StateMachine { /// Process an input message (leaders don't time out waiting for a message). This is the /// main entry point for the state machine. We need read-access to the inner consensus struct. /// As a result, we can modify our state machine or send a message to the executor. - #[instrument(level = "trace", ret)] - pub(crate) fn process_input( + #[instrument(level = "trace", skip(self), ret)] + pub(crate) async fn process_input( &mut self, ctx: &ctx::Ctx, consensus: &ConsensusInner, input: validator::Signed, - ) { + ) -> anyhow::Result<()> { let now = ctx.now(); let label = match &input.msg { validator::ConsensusMsg::ReplicaPrepare(_) => { - let res = self + let res = match self .process_replica_prepare(ctx, consensus, input.cast().unwrap()) - .map_err(|err| { + .await + { + Ok(()) => Ok(()), + Err(super::replica_prepare::Error::Internal(err)) => { + return Err(err).context("process_replica_prepare()") + } + Err(err) => { tracing::warn!("process_replica_prepare: {err:#}"); - }); + Err(()) + } + }; metrics::ConsensusMsgLabel::ReplicaPrepare.with_result(&res) } validator::ConsensusMsg::ReplicaCommit(_) => { @@ -79,5 +91,6 @@ impl StateMachine { _ => unreachable!(), }; metrics::METRICS.leader_processing_latency[&label].observe_latency(ctx.now() - now); + Ok(()) } } diff --git a/node/actors/bft/src/lib.rs b/node/actors/bft/src/lib.rs index dda99da5..0115ec54 100644 --- a/node/actors/bft/src/lib.rs +++ b/node/actors/bft/src/lib.rs @@ -14,10 +14,10 @@ //! - [Notes on modern consensus algorithms](https://timroughgarden.github.io/fob21/andy.pdf) //! - [Blog post comparing several consensus algorithms](https://decentralizedthoughts.github.io/2023-04-01-hotstuff-2/) //! - Blog posts explaining [safety](https://seafooler.com/2022/01/24/understanding-safety-hotstuff/) and [responsiveness](https://seafooler.com/2022/04/02/understanding-responsiveness-hotstuff/) - use crate::io::{InputMessage, OutputMessage}; use anyhow::Context as _; use inner::ConsensusInner; +use std::sync::Arc; use tracing::{info, instrument}; use zksync_concurrency::ctx; use zksync_consensus_roles::validator; @@ -34,8 +34,18 @@ pub mod testonly; #[cfg(test)] mod tests; +/// Payload provider for the new blocks. +#[async_trait::async_trait] +pub trait PayloadSource: Send + Sync + 'static { + /// Propose a payload for the block `block_number`. + async fn propose( + &self, + ctx: &ctx::Ctx, + block_number: validator::BlockNumber, + ) -> anyhow::Result; +} + /// The Consensus struct implements the consensus algorithm and is the main entry point for the consensus actor. -#[derive(Debug)] pub struct Consensus { /// The inner struct contains the data that is shared between the consensus state machines. pub(crate) inner: ConsensusInner, @@ -47,13 +57,14 @@ pub struct Consensus { impl Consensus { /// Creates a new Consensus struct. - #[instrument(level = "trace", ret)] + #[instrument(level = "trace", skip(payload_source))] pub async fn new( ctx: &ctx::Ctx, pipe: ActorPipe, secret_key: validator::SecretKey, validator_set: validator::ValidatorSet, storage: ReplicaStore, + payload_source: Arc, ) -> anyhow::Result { Ok(Consensus { inner: ConsensusInner { @@ -62,13 +73,13 @@ impl Consensus { validator_set, }, replica: replica::StateMachine::new(ctx, storage).await?, - leader: leader::StateMachine::new(ctx), + leader: leader::StateMachine::new(ctx, payload_source), }) } /// Starts the Consensus actor. It will start running, processing incoming messages and /// sending output messages. This is a blocking method. - #[instrument(level = "trace", ret)] + #[instrument(level = "trace", skip(self) ret)] pub async fn run(mut self, ctx: &ctx::Ctx) -> anyhow::Result<()> { info!( "Starting consensus actor {:?}", @@ -110,7 +121,7 @@ impl Consensus { match &req.msg.msg { validator::ConsensusMsg::ReplicaPrepare(_) | validator::ConsensusMsg::ReplicaCommit(_) => { - self.leader.process_input(ctx, &self.inner, req.msg); + self.leader.process_input(ctx, &self.inner, req.msg).await?; } validator::ConsensusMsg::LeaderPrepare(_) | validator::ConsensusMsg::LeaderCommit(_) => { diff --git a/node/actors/bft/src/testonly/make.rs b/node/actors/bft/src/testonly/make.rs index 568096f3..0190fbdd 100644 --- a/node/actors/bft/src/testonly/make.rs +++ b/node/actors/bft/src/testonly/make.rs @@ -1,15 +1,31 @@ //! This module contains utilities that are only meant for testing purposes. - use crate::{ io::{InputMessage, OutputMessage}, - Consensus, + Consensus, ConsensusInner, PayloadSource, }; +use rand::Rng as _; use std::sync::Arc; use zksync_concurrency::ctx; use zksync_consensus_roles::validator; use zksync_consensus_storage::{InMemoryStorage, ReplicaStore}; use zksync_consensus_utils::pipe::{self, DispatcherPipe}; +/// Provides payload consisting of random bytes. +pub struct RandomPayloadSource; + +#[async_trait::async_trait] +impl PayloadSource for RandomPayloadSource { + async fn propose( + &self, + ctx: &ctx::Ctx, + _block_number: validator::BlockNumber, + ) -> anyhow::Result { + let mut payload = validator::Payload(vec![0; ConsensusInner::PAYLOAD_MAX_SIZE]); + ctx.rng().fill(&mut payload.0[..]); + Ok(payload) + } +} + /// This creates a mock Consensus struct for unit tests. pub async fn make_consensus( ctx: &ctx::Ctx, @@ -28,6 +44,7 @@ pub async fn make_consensus( key.clone(), validator_set.clone(), ReplicaStore::from_store(Arc::new(storage)), + Arc::new(RandomPayloadSource), ); let consensus = consensus .await diff --git a/node/actors/bft/src/testonly/run.rs b/node/actors/bft/src/testonly/run.rs index 9581a556..3a4cda57 100644 --- a/node/actors/bft/src/testonly/run.rs +++ b/node/actors/bft/src/testonly/run.rs @@ -1,4 +1,4 @@ -use super::{Behavior, Node}; +use super::{Behavior, Node, RandomPayloadSource}; use crate::{testonly, Consensus}; use anyhow::Context; use std::{collections::HashMap, sync::Arc}; @@ -105,6 +105,7 @@ async fn run_nodes(ctx: &ctx::Ctx, network: Network, nodes: &[Node]) -> anyhow:: node.net.consensus_config().key.clone(), validator_set, storage, + Arc::new(RandomPayloadSource), ) .await .context("consensus")?; diff --git a/node/actors/executor/src/lib.rs b/node/actors/executor/src/lib.rs index e61abdbc..ccb9978c 100644 --- a/node/actors/executor/src/lib.rs +++ b/node/actors/executor/src/lib.rs @@ -1,10 +1,9 @@ //! Library files for the executor. We have it separate from the binary so that we can use these files in the tools crate. - use crate::io::Dispatcher; use anyhow::Context as _; use std::sync::Arc; use zksync_concurrency::{ctx, net, scope}; -use zksync_consensus_bft::{misc::consensus_threshold, Consensus}; +use zksync_consensus_bft::{misc::consensus_threshold, Consensus, PayloadSource}; use zksync_consensus_network as network; use zksync_consensus_roles::{node, validator}; use zksync_consensus_storage::{ReplicaStateStore, ReplicaStore, WriteBlockStore}; @@ -20,7 +19,6 @@ mod tests; pub use self::config::{proto, ConsensusConfig, ExecutorConfig, GossipConfig}; /// Validator-related part of [`Executor`]. -#[derive(Debug)] struct ValidatorExecutor { /// Consensus network configuration. config: ConsensusConfig, @@ -28,6 +26,8 @@ struct ValidatorExecutor { key: validator::SecretKey, /// Store for replica state. replica_state_store: Arc, + /// Payload proposer for new blocks. + payload_source: Arc, } impl ValidatorExecutor { @@ -42,7 +42,6 @@ impl ValidatorExecutor { } /// Executor allowing to spin up all actors necessary for a consensus node. -#[derive(Debug)] pub struct Executor { /// General-purpose executor configuration. executor_config: ExecutorConfig, @@ -82,6 +81,7 @@ impl Executor { config: ConsensusConfig, key: validator::SecretKey, replica_state_store: Arc, + payload_source: Arc, ) -> anyhow::Result<()> { let public = &config.key; anyhow::ensure!( @@ -100,6 +100,7 @@ impl Executor { config, key, replica_state_store, + payload_source, }); } else { tracing::info!( @@ -162,6 +163,7 @@ impl Executor { validator.key.clone(), validator_set.clone(), consensus_storage, + validator.payload_source, ) .await .context("consensus")?; diff --git a/node/actors/executor/src/tests.rs b/node/actors/executor/src/tests.rs index 23c63e07..ee9f432d 100644 --- a/node/actors/executor/src/tests.rs +++ b/node/actors/executor/src/tests.rs @@ -6,6 +6,7 @@ use rand::Rng; use std::iter; use test_casing::test_casing; use zksync_concurrency::{sync, testonly::abort_on_panic, time}; +use zksync_consensus_bft::testonly::RandomPayloadSource; use zksync_consensus_roles::validator::{BlockNumber, FinalBlock, Payload}; use zksync_consensus_storage::{BlockStore, InMemoryStorage}; @@ -34,7 +35,12 @@ impl FullValidatorConfig { fn into_executor(self, storage: Arc) -> Executor { let mut executor = Executor::new(self.node_config, self.node_key, storage.clone()).unwrap(); executor - .set_validator(self.consensus_config, self.validator_key, storage) + .set_validator( + self.consensus_config, + self.validator_key, + storage, + Arc::new(RandomPayloadSource), + ) .unwrap(); executor } diff --git a/node/libs/storage/src/replica_state.rs b/node/libs/storage/src/replica_state.rs index 8d4da417..eb13e6e8 100644 --- a/node/libs/storage/src/replica_state.rs +++ b/node/libs/storage/src/replica_state.rs @@ -20,7 +20,7 @@ impl From for ReplicaState { } } -/// [`ReplicaStateStore`] wrapper that falls back to a specified block store. +/// ReplicaStorage. #[derive(Debug, Clone)] pub struct ReplicaStore { state: Arc, diff --git a/node/libs/storage/src/traits.rs b/node/libs/storage/src/traits.rs index 9aa9298f..76a1b160 100644 --- a/node/libs/storage/src/traits.rs +++ b/node/libs/storage/src/traits.rs @@ -3,7 +3,7 @@ use crate::{types::ReplicaState, StorageResult}; use async_trait::async_trait; use std::{fmt, ops, sync::Arc}; use zksync_concurrency::{ctx, sync::watch}; -use zksync_consensus_roles::validator::{BlockNumber, FinalBlock}; +use zksync_consensus_roles::validator::{BlockNumber, FinalBlock, Payload}; /// Storage of L2 blocks. /// @@ -86,6 +86,15 @@ impl BlockStore for Arc { /// Implementations **must** propagate context cancellation using [`StorageError::Canceled`]. #[async_trait] pub trait WriteBlockStore: BlockStore { + /// Verify that `payload` is a correct proposal for the block `block_number`. + async fn verify_payload( + &self, + _ctx: &ctx::Ctx, + _block_number: BlockNumber, + _payload: &Payload, + ) -> anyhow::Result<()> { + Ok(()) + } /// Puts a block into this storage. async fn put_block(&self, ctx: &ctx::Ctx, block: &FinalBlock) -> StorageResult<()>; } diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index 9f5447b0..5d8afa64 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -113,7 +113,12 @@ async fn main() -> anyhow::Result<()> { .context("Executor::new()")?; if let Some((consensus_config, validator_key)) = configs.consensus { executor - .set_validator(consensus_config, validator_key, storage.clone()) + .set_validator( + consensus_config, + validator_key, + storage.clone(), + Arc::new(zksync_consensus_bft::testonly::RandomPayloadSource), + ) .context("Executor::set_validator()")?; } From a2abdd473a67aaf29def05d176c5cf0e9da6184f Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Mon, 27 Nov 2023 13:28:49 +0100 Subject: [PATCH 04/14] made inmem storage infallible --- node/actors/executor/src/tests.rs | 5 ++-- node/libs/storage/src/in_memory.rs | 40 +++++++++++++----------------- 2 files changed, 19 insertions(+), 26 deletions(-) diff --git a/node/actors/executor/src/tests.rs b/node/actors/executor/src/tests.rs index 0f37e558..c34c48c5 100644 --- a/node/actors/executor/src/tests.rs +++ b/node/actors/executor/src/tests.rs @@ -78,8 +78,7 @@ async fn executor_misconfiguration(name: &str, mutation: fn(&mut FinalBlock)) { mutation(genesis_block); let storage = Arc::new(InMemoryStorage::new(genesis_block.clone())); let err = Executor::new(ctx, validator.node_config, validator.node_key, storage) - .await - .unwrap_err(); + .await.err().unwrap(); tracing::info!(%err, "received expected validation error"); } @@ -95,7 +94,7 @@ async fn genesis_block_mismatch() { let storage = Arc::new(InMemoryStorage::new(genesis_block.clone())); let err = Executor::new(ctx, validator.node_config, validator.node_key, storage) .await - .unwrap_err(); + .err().unwrap(); tracing::info!(%err, "received expected validation error"); } diff --git a/node/libs/storage/src/in_memory.rs b/node/libs/storage/src/in_memory.rs index 9c81e123..f272ce70 100644 --- a/node/libs/storage/src/in_memory.rs +++ b/node/libs/storage/src/in_memory.rs @@ -9,7 +9,7 @@ use async_trait::async_trait; use std::{collections::BTreeMap, ops}; use zksync_concurrency::{ ctx, - sync::{self, watch, Mutex}, + sync::{watch, Mutex}, }; use zksync_consensus_roles::validator::{BlockNumber, FinalBlock}; @@ -90,36 +90,32 @@ impl InMemoryStorage { #[async_trait] impl BlockStore for InMemoryStorage { - async fn head_block(&self, ctx: &ctx::Ctx) -> StorageResult { - Ok(sync::lock(ctx, &self.blocks).await?.head_block().clone()) + async fn head_block(&self, _ctx: &ctx::Ctx) -> StorageResult { + Ok(self.blocks.lock().await.head_block().clone()) } - async fn first_block(&self, ctx: &ctx::Ctx) -> StorageResult { - Ok(sync::lock(ctx, &self.blocks).await?.first_block().clone()) + async fn first_block(&self, _ctx: &ctx::Ctx) -> StorageResult { + Ok(self.blocks.lock().await.first_block().clone()) } - async fn last_contiguous_block_number(&self, ctx: &ctx::Ctx) -> StorageResult { - Ok(sync::lock(ctx, &self.blocks) - .await? - .last_contiguous_block_number) + async fn last_contiguous_block_number(&self, _ctx: &ctx::Ctx) -> StorageResult { + Ok(self.blocks.lock().await.last_contiguous_block_number) } async fn block( &self, - ctx: &ctx::Ctx, + _ctx: &ctx::Ctx, number: BlockNumber, ) -> StorageResult> { - Ok(sync::lock(ctx, &self.blocks).await?.block(number).cloned()) + Ok(self.blocks.lock().await.block(number).cloned()) } async fn missing_block_numbers( &self, - ctx: &ctx::Ctx, + _ctx: &ctx::Ctx, range: ops::Range, ) -> StorageResult> { - Ok(sync::lock(ctx, &self.blocks) - .await? - .missing_block_numbers(range)) + Ok(self.blocks.lock().await.missing_block_numbers(range)) } fn subscribe_to_block_writes(&self) -> watch::Receiver { @@ -129,10 +125,8 @@ impl BlockStore for InMemoryStorage { #[async_trait] impl WriteBlockStore for InMemoryStorage { - async fn put_block(&self, ctx: &ctx::Ctx, block: &FinalBlock) -> StorageResult<()> { - sync::lock(ctx, &self.blocks) - .await? - .put_block(block.clone()); + async fn put_block(&self, _ctx: &ctx::Ctx, block: &FinalBlock) -> StorageResult<()> { + self.blocks.lock().await.put_block(block.clone()); self.blocks_sender.send_replace(block.header.number); Ok(()) } @@ -140,16 +134,16 @@ impl WriteBlockStore for InMemoryStorage { #[async_trait] impl ReplicaStateStore for InMemoryStorage { - async fn replica_state(&self, ctx: &ctx::Ctx) -> StorageResult> { - Ok(sync::lock(ctx, &self.replica_state).await?.clone()) + async fn replica_state(&self, _ctx: &ctx::Ctx) -> StorageResult> { + Ok(self.replica_state.lock().await.clone()) } async fn put_replica_state( &self, - ctx: &ctx::Ctx, + _ctx: &ctx::Ctx, replica_state: &ReplicaState, ) -> StorageResult<()> { - *sync::lock(ctx, &self.replica_state).await? = Some(replica_state.clone()); + *self.replica_state.lock().await = Some(replica_state.clone()); Ok(()) } } From b8e56ba091ce605df602e19b27d7694d74d20461 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Sun, 3 Dec 2023 18:00:03 +0100 Subject: [PATCH 05/14] WIP strongly typed bft unit tests --- node/actors/bft/src/leader/replica_prepare.rs | 18 +- node/actors/bft/src/leader/state_machine.rs | 9 +- node/actors/bft/src/leader/tests.rs | 321 +++++++----------- node/actors/bft/src/lib.rs | 22 +- node/actors/bft/src/replica/leader_prepare.rs | 11 + node/actors/bft/src/replica/tests.rs | 24 ++ node/actors/bft/src/testonly/make.rs | 2 +- node/actors/bft/src/testonly/ut_harness.rs | 287 ++++++---------- node/libs/storage/src/replica_state.rs | 5 + node/libs/storage/src/traits.rs | 15 +- 10 files changed, 310 insertions(+), 404 deletions(-) diff --git a/node/actors/bft/src/leader/replica_prepare.rs b/node/actors/bft/src/leader/replica_prepare.rs index 9ec6c742..10e6eeb8 100644 --- a/node/actors/bft/src/leader/replica_prepare.rs +++ b/node/actors/bft/src/leader/replica_prepare.rs @@ -2,7 +2,7 @@ use super::StateMachine; use crate::{inner::ConsensusInner, metrics}; use std::collections::HashMap; use tracing::instrument; -use zksync_concurrency::ctx; +use zksync_concurrency::{ctx, error::Wrap}; use zksync_consensus_network::io::{ConsensusInputMessage, Target}; use zksync_consensus_roles::validator; @@ -54,8 +54,20 @@ pub(crate) enum Error { #[error("invalid high QC: {0:#}")] InvalidHighQC(#[source] anyhow::Error), /// Internal error. Unlike other error types, this one isn't supposed to be easily recoverable. - #[error("internal error: {0:#}")] - Internal(#[from] anyhow::Error), + #[error(transparent)] + Internal(#[from] ctx::Error), +} + +impl Wrap for Error { + fn with_wrap C>( + self, + f: F, + ) -> Self { + match self { + Error::Internal(err) => Error::Internal(err.with_wrap(f)), + err => err, + } + } } impl StateMachine { diff --git a/node/actors/bft/src/leader/state_machine.rs b/node/actors/bft/src/leader/state_machine.rs index 32b652b3..37505d40 100644 --- a/node/actors/bft/src/leader/state_machine.rs +++ b/node/actors/bft/src/leader/state_machine.rs @@ -1,12 +1,11 @@ use crate::{metrics, ConsensusInner, PayloadSource}; -use anyhow::Context as _; use std::{ collections::{BTreeMap, HashMap}, sync::Arc, unreachable, }; use tracing::instrument; -use zksync_concurrency::{ctx, metrics::LatencyHistogramExt as _, time}; +use zksync_concurrency::{ctx, metrics::LatencyHistogramExt as _, time, error::Wrap as _}; use zksync_consensus_roles::validator; /// The StateMachine struct contains the state of the leader. This is a simple state machine. We just store @@ -61,17 +60,17 @@ impl StateMachine { ctx: &ctx::Ctx, consensus: &ConsensusInner, input: validator::Signed, - ) -> anyhow::Result<()> { + ) -> ctx::Result<()> { let now = ctx.now(); let label = match &input.msg { validator::ConsensusMsg::ReplicaPrepare(_) => { let res = match self .process_replica_prepare(ctx, consensus, input.cast().unwrap()) - .await + .await.wrap("process_replica_prepare()") { Ok(()) => Ok(()), Err(super::replica_prepare::Error::Internal(err)) => { - return Err(err).context("process_replica_prepare()") + return Err(err); } Err(err) => { tracing::warn!("process_replica_prepare: {err:#}"); diff --git a/node/actors/bft/src/leader/tests.rs b/node/actors/bft/src/leader/tests.rs index 3883823e..abaec805 100644 --- a/node/actors/bft/src/leader/tests.rs +++ b/node/actors/bft/src/leader/tests.rs @@ -1,85 +1,58 @@ use super::{ replica_commit::Error as ReplicaCommitError, replica_prepare::Error as ReplicaPrepareError, }; +use zksync_concurrency::ctx; use crate::testonly::ut_harness::UTHarness; use assert_matches::assert_matches; use rand::Rng; use zksync_consensus_roles::validator::{ - self, CommitQC, ConsensusMsg, LeaderCommit, LeaderPrepare, Phase, PrepareQC, ProtocolVersion, - ReplicaCommit, ReplicaPrepare, ViewNumber, + self, LeaderCommit, LeaderPrepare, Phase, PrepareQC, ProtocolVersion, + ViewNumber, }; #[tokio::test] async fn replica_prepare_sanity() { - let mut util = UTHarness::new_many().await; - + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new_many(ctx).await; util.set_view(util.owner_as_view_leader()); - - let replica_prepare = util.new_current_replica_prepare(|_| {}).cast().unwrap().msg; - util.dispatch_replica_prepare_many( - vec![replica_prepare; util.consensus_threshold()], - util.keys(), - ) - .await; + util.new_procedural_leader_prepare_many(ctx).await; } #[tokio::test] async fn replica_prepare_sanity_yield_leader_prepare() { - let mut util = UTHarness::new_one().await; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; let replica_prepare = util.new_current_replica_prepare(|_| {}); - util.dispatch_replica_prepare_one(replica_prepare.clone()) - .await - .unwrap(); - let leader_prepare = util - .recv_signed() - .await - .unwrap() - .cast::() - .unwrap() - .msg; - - let replica_prepare = replica_prepare.cast::().unwrap().msg; + let leader_prepare = util.process_replica_prepare(ctx,replica_prepare.clone()).await.unwrap().unwrap(); assert_matches!( - leader_prepare, + leader_prepare.msg, LeaderPrepare { protocol_version, view, proposal, - proposal_payload: _, justification, + .. } => { - assert_eq!(protocol_version, replica_prepare.protocol_version); - assert_eq!(view, replica_prepare.view); - assert_eq!(proposal.parent, replica_prepare.high_vote.proposal.hash()); - assert_eq!(justification, util.new_prepare_qc(|msg| *msg = replica_prepare)); + assert_eq!(protocol_version, replica_prepare.msg.protocol_version); + assert_eq!(view, replica_prepare.msg.view); + assert_eq!(proposal.parent, replica_prepare.msg.high_vote.proposal.hash()); + assert_eq!(justification, util.new_prepare_qc(|msg| *msg = replica_prepare.msg)); } ); } #[tokio::test] async fn replica_prepare_sanity_yield_leader_prepare_reproposal() { - let mut util = UTHarness::new_many().await; - + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new_many(ctx).await; util.set_view(util.owner_as_view_leader()); - let replica_prepare: ReplicaPrepare = - util.new_unfinalized_replica_prepare().cast().unwrap().msg; - util.dispatch_replica_prepare_many( - vec![replica_prepare.clone(); util.consensus_threshold()], - util.keys(), - ) - .await; - let leader_prepare = util - .recv_signed() - .await - .unwrap() - .cast::() - .unwrap() - .msg; + let replica_prepare = util.new_unfinalized_replica_prepare().msg; + let leader_prepare = util.process_replica_prepare_many(ctx, vec![replica_prepare.clone(); util.consensus_threshold()], util.keys()).await.unwrap(); assert_matches!( - leader_prepare, + leader_prepare.msg, LeaderPrepare { protocol_version, view, @@ -104,14 +77,14 @@ async fn replica_prepare_sanity_yield_leader_prepare_reproposal() { #[tokio::test] async fn replica_prepare_old_view() { - let mut util = UTHarness::new_one().await; - + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; util.set_replica_view(ViewNumber(1)); util.set_leader_view(ViewNumber(2)); util.set_leader_phase(Phase::Prepare); let replica_prepare = util.new_current_replica_prepare(|_| {}); - let res = util.dispatch_replica_prepare_one(replica_prepare).await; + let res = util.process_replica_prepare(ctx,replica_prepare).await; assert_matches!( res, Err(ReplicaPrepareError::Old { @@ -123,12 +96,12 @@ async fn replica_prepare_old_view() { #[tokio::test] async fn replica_prepare_during_commit() { - let mut util = UTHarness::new_one().await; - + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; util.set_leader_phase(Phase::Commit); let replica_prepare = util.new_current_replica_prepare(|_| {}); - let res = util.dispatch_replica_prepare_one(replica_prepare).await; + let res = util.process_replica_prepare(ctx,replica_prepare).await; assert_matches!( res, Err(ReplicaPrepareError::Old { @@ -140,54 +113,47 @@ async fn replica_prepare_during_commit() { #[tokio::test] async fn replica_prepare_not_leader_in_view() { - let mut util = UTHarness::new_with(2).await; - + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,2).await; let current_view_leader = util.view_leader(util.current_replica_view()); assert_ne!(current_view_leader, util.owner_key().public()); let replica_prepare = util.new_current_replica_prepare(|_| {}); - let res = util - .dispatch_replica_prepare_one(replica_prepare.clone()) - .await; + let res = util.process_replica_prepare(ctx,replica_prepare).await; assert_matches!(res, Err(ReplicaPrepareError::NotLeaderInView)); } #[tokio::test] async fn replica_prepare_already_exists() { - let mut util = UTHarness::new_with(2).await; - + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,2).await; let view = ViewNumber(2); util.set_replica_view(view); util.set_leader_view(view); assert_eq!(util.view_leader(view), util.owner_key().public()); let replica_prepare = util.new_current_replica_prepare(|_| {}); - assert!(util - .dispatch_replica_prepare_one(replica_prepare.clone()) - .await - .is_err()); - let res = util - .dispatch_replica_prepare_one(replica_prepare.clone()) - .await; + assert!(util.process_replica_prepare(ctx,replica_prepare.clone()).await.is_err()); + let res = util.process_replica_prepare(ctx,replica_prepare.clone()).await; assert_matches!( - res, - Err(ReplicaPrepareError::Exists { existing_message }) => { - assert_eq!(existing_message, replica_prepare.cast().unwrap().msg); + res, + Err(ReplicaPrepareError::Exists { existing_message }) => { + assert_eq!(existing_message, replica_prepare.msg); } ); } #[tokio::test] async fn replica_prepare_num_received_below_threshold() { - let mut util = UTHarness::new_with(2).await; - + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx, 2).await; let view = ViewNumber(2); util.set_replica_view(view); util.set_leader_view(view); assert_eq!(util.view_leader(view), util.owner_key().public()); let replica_prepare = util.new_current_replica_prepare(|_| {}); - let res = util.dispatch_replica_prepare_one(replica_prepare).await; + let res = util.process_replica_prepare(ctx,replica_prepare).await; assert_matches!( res, Err(ReplicaPrepareError::NumReceivedBelowThreshold { @@ -199,37 +165,33 @@ async fn replica_prepare_num_received_below_threshold() { #[tokio::test] async fn replica_prepare_invalid_sig() { - let mut util = UTHarness::new_one().await; - + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; let mut replica_prepare = util.new_current_replica_prepare(|_| {}); - replica_prepare.sig = util.rng().gen(); - let res = util.dispatch_replica_prepare_one(replica_prepare).await; + replica_prepare.sig = ctx.rng().gen(); + let res = util.process_replica_prepare(ctx,replica_prepare).await; assert_matches!(res, Err(ReplicaPrepareError::InvalidSignature(_))); } #[tokio::test] async fn replica_prepare_invalid_commit_qc() { - let mut util = UTHarness::new_one().await; - - let junk = util.rng().gen::(); - let replica_prepare = util.new_current_replica_prepare(|msg| msg.high_qc = junk); - let res = util.dispatch_replica_prepare_one(replica_prepare).await; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; + let replica_prepare = util.new_current_replica_prepare(|msg| msg.high_qc = ctx.rng().gen()); + let res = util.process_replica_prepare(ctx, replica_prepare).await; assert_matches!(res, Err(ReplicaPrepareError::InvalidHighQC(..))); } #[tokio::test] async fn replica_prepare_high_qc_of_current_view() { - let mut util = UTHarness::new_one().await; - + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; let view = ViewNumber(1); let qc_view = ViewNumber(1); - util.set_view(view); - let qc = util.new_commit_qc(|msg| { - msg.view = qc_view; - }); + let qc = util.new_commit_qc(|msg| msg.view = qc_view); let replica_prepare = util.new_current_replica_prepare(|msg| msg.high_qc = qc); - let res = util.dispatch_replica_prepare_one(replica_prepare).await; + let res = util.process_replica_prepare(ctx,replica_prepare).await; assert_matches!( res, Err(ReplicaPrepareError::HighQCOfFutureView { high_qc_view, current_view }) => { @@ -241,18 +203,15 @@ async fn replica_prepare_high_qc_of_current_view() { #[tokio::test] async fn replica_prepare_high_qc_of_future_view() { - let mut util = UTHarness::new_one().await; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; let view = ViewNumber(1); let qc_view = ViewNumber(2); - util.set_view(view); - let qc = util.new_commit_qc(|msg| { - msg.view = qc_view; - }); - + let qc = util.new_commit_qc(|msg| msg.view = qc_view); let replica_prepare = util.new_current_replica_prepare(|msg| msg.high_qc = qc); - let res = util.dispatch_replica_prepare_one(replica_prepare).await; + let res = util.process_replica_prepare(ctx,replica_prepare).await; assert_matches!( res, Err(ReplicaPrepareError::HighQCOfFutureView{ high_qc_view, current_view }) => { @@ -265,21 +224,21 @@ async fn replica_prepare_high_qc_of_future_view() { #[ignore = "fails/unsupported"] #[tokio::test] async fn replica_prepare_non_validator_signer() { - let mut util = UTHarness::new_with(2).await; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,2).await; let view = ViewNumber(2); util.set_view(view); assert_eq!(util.view_leader(view), util.key_at(0).public()); - let replica_prepare = util.new_current_replica_prepare(|_| {}); assert!(util - .dispatch_replica_prepare_one(replica_prepare.clone()) + .process_replica_prepare(ctx,replica_prepare.clone()) .await .is_err()); - let non_validator: validator::SecretKey = util.rng().gen(); + let non_validator: validator::SecretKey = ctx.rng().gen(); let replica_prepare = non_validator.sign_msg(replica_prepare.msg); - util.dispatch_replica_prepare_one(replica_prepare) + util.process_replica_prepare(ctx,replica_prepare) .await .unwrap(); // PANICS: @@ -288,67 +247,38 @@ async fn replica_prepare_non_validator_signer() { #[tokio::test] async fn replica_commit_sanity() { - let mut util = UTHarness::new_many().await; - + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new_many(ctx).await; util.set_view(util.owner_as_view_leader()); - - let replica_commit = util - .new_procedural_replica_commit_many() - .await - .cast() - .unwrap() - .msg; - util.dispatch_replica_commit_many( - vec![replica_commit; util.consensus_threshold()], - util.keys(), - ) - .unwrap(); + util.new_procedural_leader_commit_many(ctx).await; } #[tokio::test] async fn replica_commit_sanity_yield_leader_commit() { - let mut util = UTHarness::new_one().await; - - let replica_commit = util.new_procedural_replica_commit_one().await; - util.dispatch_replica_commit_one(replica_commit.clone()) - .unwrap(); - let leader_commit = util - .recv_signed() - .await - .unwrap() - .cast::() - .unwrap() - .msg; - - let replica_commit = replica_commit.cast::().unwrap().msg; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; + let replica_commit = util.new_procedural_replica_commit(ctx).await; + let leader_commit = util.process_replica_commit(ctx,replica_commit.clone()).await.unwrap().unwrap(); assert_matches!( - leader_commit, + leader_commit.msg, LeaderCommit { protocol_version, justification, } => { - assert_eq!(protocol_version, replica_commit.protocol_version); - assert_eq!(justification, util.new_commit_qc(|msg| *msg = replica_commit)); + assert_eq!(protocol_version, replica_commit.msg.protocol_version); + assert_eq!(justification, util.new_commit_qc(|msg| *msg = replica_commit.msg)); } ); } #[tokio::test] async fn replica_commit_old() { - let mut util = UTHarness::new_one().await; - - let mut replica_commit = util - .new_procedural_replica_commit_one() - .await - .cast::() - .unwrap() - .msg; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; + let mut replica_commit = util.new_procedural_replica_commit(ctx).await.msg; replica_commit.view = util.current_replica_view().prev(); - let replica_commit = util - .owner_key() - .sign_msg(ConsensusMsg::ReplicaCommit(replica_commit)); - - let res = util.dispatch_replica_commit_one(replica_commit); + let replica_commit = util.owner_key().sign_msg(replica_commit); + let res = util.process_replica_commit(ctx,replica_commit).await; assert_matches!( res, Err(ReplicaCommitError::Old { current_view, current_phase }) => { @@ -360,73 +290,58 @@ async fn replica_commit_old() { #[tokio::test] async fn replica_commit_not_leader_in_view() { - let mut util = UTHarness::new_with(2).await; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,2).await; let current_view_leader = util.view_leader(util.current_replica_view()); assert_ne!(current_view_leader, util.owner_key().public()); let replica_commit = util.new_current_replica_commit(|_| {}); - let res = util.dispatch_replica_commit_one(replica_commit); + let res = util.process_replica_commit(ctx,replica_commit).await; assert_matches!(res, Err(ReplicaCommitError::NotLeaderInView)); } #[tokio::test] async fn replica_commit_already_exists() { - let mut util = UTHarness::new_with(2).await; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,2).await; let view = ViewNumber(2); util.set_replica_view(view); util.set_leader_view(view); assert_eq!(util.view_leader(view), util.owner_key().public()); - let replica_prepare_one = util.new_current_replica_prepare(|_| {}); - assert!(util - .dispatch_replica_prepare_one(replica_prepare_one.clone()) - .await - .is_err()); - let replica_prepare_two = util.key_at(1).sign_msg(replica_prepare_one.msg); - util.dispatch_replica_prepare_one(replica_prepare_two) - .await - .unwrap(); - - let leader_prepare = util.recv_signed().await.unwrap(); - util.dispatch_leader_prepare(leader_prepare).await.unwrap(); - - let replica_commit = util.recv_signed().await.unwrap(); - let _ = util.dispatch_replica_commit_one(replica_commit.clone()); - let res = util.dispatch_replica_commit_one(replica_commit.clone()); + let replica_prepare1 = util.new_current_replica_prepare(|_| {}); + assert!(util.process_replica_prepare(ctx,replica_prepare1.clone()).await.is_err()); + let replica_prepare2 = util.key_at(1).sign_msg(replica_prepare1.msg); + let leader_prepare = util.process_replica_prepare(ctx,replica_prepare2).await.unwrap().unwrap(); + let replica_commit = util.process_leader_prepare(ctx,leader_prepare).await.unwrap(); + assert!(util.process_replica_commit(ctx,replica_commit.clone()).await.is_err()); + let res = util.process_replica_commit(ctx,replica_commit.clone()).await; assert_matches!( res, Err(ReplicaCommitError::DuplicateMessage { existing_message }) => { - assert_eq!(existing_message, replica_commit.cast::().unwrap().msg) + assert_eq!(existing_message, replica_commit.msg) } ); } #[tokio::test] async fn replica_commit_num_received_below_threshold() { - let mut util = UTHarness::new_with(2).await; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,2).await; let view = ViewNumber(2); util.set_replica_view(view); util.set_leader_view(view); assert_eq!(util.view_leader(view), util.owner_key().public()); - let replica_prepare_one = util.new_current_replica_prepare(|_| {}); - assert!(util - .dispatch_replica_prepare_one(replica_prepare_one.clone()) - .await - .is_err()); - let replica_prepare_two = util.key_at(1).sign_msg(replica_prepare_one.msg); - util.dispatch_replica_prepare_one(replica_prepare_two) - .await - .unwrap(); - - let leader_prepare = util.recv_signed().await.unwrap(); - util.dispatch_leader_prepare(leader_prepare).await.unwrap(); - - let replica_commit = util.recv_signed().await.unwrap(); - let res = util.dispatch_replica_commit_one(replica_commit.clone()); + let replica_prepare = util.new_current_replica_prepare(|_| {}); + assert!(util.process_replica_prepare(ctx,replica_prepare.clone()).await.is_err()); + let replica_prepare = util.key_at(1).sign_msg(replica_prepare.msg); + let leader_prepare = util.process_replica_prepare(ctx,replica_prepare).await.unwrap().unwrap(); + let replica_commit = util.process_leader_prepare(ctx,leader_prepare).await.unwrap(); + let res = util.process_replica_commit(ctx,replica_commit.clone()).await; assert_matches!( res, Err(ReplicaCommitError::NumReceivedBelowThreshold { @@ -438,58 +353,50 @@ async fn replica_commit_num_received_below_threshold() { #[tokio::test] async fn replica_commit_invalid_sig() { - let mut util = UTHarness::new_one().await; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; let mut replica_commit = util.new_current_replica_commit(|_| {}); replica_commit.sig = util.rng().gen(); - let res = util.dispatch_replica_commit_one(replica_commit); + let res = util.process_replica_commit(ctx,replica_commit).await; assert_matches!(res, Err(ReplicaCommitError::InvalidSignature(..))); } #[tokio::test] async fn replica_commit_unexpected_proposal() { - let mut util = UTHarness::new_one().await; - + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; let replica_commit = util.new_current_replica_commit(|_| {}); - let res = util.dispatch_replica_commit_one(replica_commit); + let res = util.process_replica_commit(ctx,replica_commit).await; assert_matches!(res, Err(ReplicaCommitError::UnexpectedProposal)); } #[ignore = "fails/unsupported"] #[tokio::test] async fn replica_commit_protocol_version_mismatch() { - let mut util = UTHarness::new_with(2).await; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,2).await; let view = ViewNumber(2); util.set_replica_view(view); util.set_leader_view(view); assert_eq!(util.view_leader(view), util.owner_key().public()); - let replica_prepare_one = util.new_current_replica_prepare(|_| {}); + let replica_prepare = util.new_current_replica_prepare(|_| {}); assert!(util - .dispatch_replica_prepare_one(replica_prepare_one.clone()) + .process_replica_prepare(ctx,replica_prepare.clone()) .await .is_err()); - let replica_prepare_two = util.key_at(1).sign_msg(replica_prepare_one.msg); - util.dispatch_replica_prepare_one(replica_prepare_two) - .await - .unwrap(); + let replica_prepare = util.key_at(1).sign_msg(replica_prepare.msg); + let leader_prepare = util.process_replica_prepare(ctx,replica_prepare).await.unwrap().unwrap(); + let replica_commit = util.process_leader_prepare(ctx,leader_prepare).await.unwrap(); + assert!(util.process_replica_commit(ctx,replica_commit.clone()).await.is_err()); - let leader_prepare = util.recv_signed().await.unwrap(); - util.dispatch_leader_prepare(leader_prepare).await.unwrap(); + let mut replica_commit2 = replica_commit.msg; + replica_commit2.protocol_version = ProtocolVersion(replica_commit.msg.protocol_version.0 + 1); - let replica_commit = util.recv_signed().await.unwrap(); - let _ = util.dispatch_replica_commit_one(replica_commit.clone()); - - let mut replica_commit_two = replica_commit.cast::().unwrap().msg; - replica_commit_two.protocol_version = - ProtocolVersion(replica_commit_two.protocol_version.0 + 1); - - let replica_commit_two = util - .key_at(1) - .sign_msg(ConsensusMsg::ReplicaCommit(replica_commit_two)); - util.dispatch_replica_commit_one(replica_commit_two) - .unwrap(); + let replica_commit2 = util.key_at(1).sign_msg(replica_commit2); + util.process_replica_commit(ctx,replica_commit).await.unwrap(); // PANICS: // "Couldn't create justification from valid replica messages!: CommitQC can only be created from votes for the same message." } diff --git a/node/actors/bft/src/lib.rs b/node/actors/bft/src/lib.rs index a19af805..b4e83940 100644 --- a/node/actors/bft/src/lib.rs +++ b/node/actors/bft/src/lib.rs @@ -42,7 +42,7 @@ pub trait PayloadSource: Send + Sync + 'static { &self, ctx: &ctx::Ctx, block_number: validator::BlockNumber, - ) -> anyhow::Result; + ) -> ctx::Result; } /// The Consensus struct implements the consensus algorithm and is the main entry point for the consensus actor. @@ -110,7 +110,7 @@ impl Consensus { return Ok(()); } - match input { + let res = match input { Some(InputMessage::Network(req)) => { if req.msg.msg.protocol_version() != self.inner.protocol_version { tracing::warn!( @@ -120,25 +120,33 @@ impl Consensus { ); continue; } - match &req.msg.msg { + let res = match &req.msg.msg { validator::ConsensusMsg::ReplicaPrepare(_) | validator::ConsensusMsg::ReplicaCommit(_) => { - self.leader.process_input(ctx, &self.inner, req.msg).await?; + self.leader.process_input(ctx, &self.inner, req.msg).await } validator::ConsensusMsg::LeaderPrepare(_) | validator::ConsensusMsg::LeaderCommit(_) => { self.replica .process_input(ctx, &self.inner, Some(req.msg)) - .await?; + .await } - } + }; + // Notify network actor that the message has been processed. // Ignore sending error. let _ = req.ack.send(()); + res } None => { - self.replica.process_input(ctx, &self.inner, None).await?; + self.replica.process_input(ctx, &self.inner, None).await } + }; + if let Err(err) = res { + return match err { + ctx::Error::Canceled(_) => Ok(()), + ctx::Error::Internal(err) => Err(err), + }; } } } diff --git a/node/actors/bft/src/replica/leader_prepare.rs b/node/actors/bft/src/replica/leader_prepare.rs index 51fa919b..74ec0fee 100644 --- a/node/actors/bft/src/replica/leader_prepare.rs +++ b/node/actors/bft/src/replica/leader_prepare.rs @@ -90,6 +90,9 @@ pub(crate) enum Error { /// Proposal header corresponding to the payload. header: validator::BlockHeader, }, + /// Invalid payload. + #[error("invalid payload: {0:#}")] + ProposalInvalidPayload(#[source] anyhow::Error), /// Re-proposal without quorum. #[error("block re-proposal without quorum for the re-proposal")] ReproposalWithoutQuorum, @@ -247,6 +250,14 @@ impl StateMachine { header: message.proposal, }); } + + // Payload should be valid. + if let Err(err) = self.storage.verify_payload(ctx, message.proposal.number, payload).await { + return Err(match err { + err @ ctx::Error::Canceled(_) => Error::Internal(err), + ctx::Error::Internal(err) => Error::ProposalInvalidPayload(err), + }); + } } // The leader is re-proposing a past block. None => { diff --git a/node/actors/bft/src/replica/tests.rs b/node/actors/bft/src/replica/tests.rs index 1af6d4cb..c1263863 100644 --- a/node/actors/bft/src/replica/tests.rs +++ b/node/actors/bft/src/replica/tests.rs @@ -154,6 +154,30 @@ async fn leader_prepare_old_view() { ); } +/* +/// Tests that `WriteBlockStore::verify_payload` is applied before signing a vote. +#[tokio::test] +async fn leader_prepare_invalid_payload() { + let mut util = UTHarness::new_one().await; + let leader_prepare = util.new_procedural_leader_prepare_one().await; + let mut leader_prepare = util + .new_procedural_leader_prepare_one() + .await + .cast::() + .unwrap() + .msg; + let leader_prepare = util + .owner_key() + .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare)); + + + let res = util.dispatch_leader_prepare(leader_prepare).await; + assert_matches!( + res, + Err(LeaderPrepareError::ProposalInvalidPayload(..)), + ); +}*/ + #[tokio::test] async fn leader_prepare_invalid_sig() { let mut util = UTHarness::new_one().await; diff --git a/node/actors/bft/src/testonly/make.rs b/node/actors/bft/src/testonly/make.rs index 42d94bca..74e9a1b6 100644 --- a/node/actors/bft/src/testonly/make.rs +++ b/node/actors/bft/src/testonly/make.rs @@ -19,7 +19,7 @@ impl PayloadSource for RandomPayloadSource { &self, ctx: &ctx::Ctx, _block_number: validator::BlockNumber, - ) -> anyhow::Result { + ) -> ctx::Result { let mut payload = validator::Payload(vec![0; ConsensusInner::PAYLOAD_MAX_SIZE]); ctx.rng().fill(&mut payload.0[..]); Ok(payload) diff --git a/node/actors/bft/src/testonly/ut_harness.rs b/node/actors/bft/src/testonly/ut_harness.rs index 4e8d4b21..278a5e40 100644 --- a/node/actors/bft/src/testonly/ut_harness.rs +++ b/node/actors/bft/src/testonly/ut_harness.rs @@ -1,17 +1,14 @@ use crate::{ io::{InputMessage, OutputMessage}, leader::{ReplicaCommitError, ReplicaPrepareError}, - replica::{LeaderCommitError, LeaderPrepareError}, + replica::{LeaderPrepareError, LeaderCommitError}, Consensus, }; +use zksync_consensus_utils::enum_util::{Variant}; use assert_matches::assert_matches; use rand::{rngs::StdRng, Rng}; use std::cmp::Ordering; -use zksync_concurrency::{ - ctx, - ctx::{Canceled, Ctx}, - scope, -}; +use zksync_concurrency::ctx; use zksync_consensus_network::io::ConsensusInputMessage; use zksync_consensus_roles::validator::{ self, BlockHeader, CommitQC, ConsensusMsg, LeaderCommit, LeaderPrepare, Payload, Phase, @@ -26,29 +23,21 @@ use zksync_consensus_utils::pipe::DispatcherPipe; /// It should be instantiated once for every test case. #[cfg(test)] pub(crate) struct UTHarness { - ctx: Ctx, - rng: StdRng, - consensus: Consensus, + pub consensus: Consensus, pipe: DispatcherPipe, keys: Vec, } impl UTHarness { - /// Creates a new `UTHarness` with one validator. - pub(crate) async fn new_one() -> UTHarness { - UTHarness::new_with(1).await - } - /// Creates a new `UTHarness` with minimally-significant validator set size. - pub(crate) async fn new_many() -> UTHarness { + pub(crate) async fn new_many(ctx: &ctx::Ctx) -> UTHarness { let num_validators = 6; - assert_matches!(crate::misc::faulty_replicas(num_validators), res if res > 0); - UTHarness::new_with(num_validators).await + assert!(crate::misc::faulty_replicas(num_validators) > 0); + UTHarness::new(ctx,num_validators).await } /// Creates a new `UTHarness` with the specified validator set size. - pub(crate) async fn new_with(num_validators: usize) -> UTHarness { - let ctx = ctx::test_root(&ctx::RealClock); + pub(crate) async fn new(ctx: &ctx::Ctx, num_validators: usize) -> UTHarness { let mut rng = ctx.rng(); let keys: Vec<_> = (0..num_validators).map(|_| rng.gen()).collect(); let (genesis, val_set) = crate::testonly::make_genesis( @@ -61,14 +50,7 @@ impl UTHarness { consensus.leader.view = ViewNumber(1); consensus.replica.view = ViewNumber(1); - - UTHarness { - ctx, - rng, - consensus, - pipe, - keys, - } + UTHarness { consensus, pipe, keys } } pub(crate) fn consensus_threshold(&self) -> usize { @@ -120,7 +102,7 @@ impl UTHarness { self.consensus.replica.view = view } - pub(crate) fn new_unfinalized_replica_prepare(&self) -> Signed { + pub(crate) fn new_unfinalized_replica_prepare(&self) -> Signed { self.new_current_replica_prepare(|msg| { let mut high_vote = ReplicaCommit { protocol_version: validator::ProtocolVersion::EARLIEST, @@ -138,20 +120,15 @@ impl UTHarness { pub(crate) fn new_current_replica_prepare( &self, mutate_fn: impl FnOnce(&mut ReplicaPrepare), - ) -> Signed { + ) -> Signed { let mut msg = ReplicaPrepare { protocol_version: validator::ProtocolVersion::EARLIEST, view: self.consensus.replica.view, high_vote: self.consensus.replica.high_vote, high_qc: self.consensus.replica.high_qc.clone(), }; - mutate_fn(&mut msg); - - self.consensus - .inner - .secret_key - .sign_msg(ConsensusMsg::ReplicaPrepare(msg)) + self.consensus.inner.secret_key.sign_msg(msg) } pub(crate) fn new_rnd_leader_prepare( @@ -182,25 +159,20 @@ impl UTHarness { pub(crate) fn new_current_replica_commit( &self, mutate_fn: impl FnOnce(&mut ReplicaCommit), - ) -> Signed { + ) -> Signed { let mut msg = ReplicaCommit { protocol_version: validator::ProtocolVersion::EARLIEST, view: self.consensus.replica.view, proposal: self.consensus.replica.high_qc.message.proposal, }; - mutate_fn(&mut msg); - - self.consensus - .inner - .secret_key - .sign_msg(ConsensusMsg::ReplicaCommit(msg)) + self.consensus.inner.secret_key.sign_msg(msg) } pub(crate) fn new_rnd_leader_commit( &mut self, mutate_fn: impl FnOnce(&mut LeaderCommit), - ) -> Signed { + ) -> Signed { let mut msg = LeaderCommit { protocol_version: validator::ProtocolVersion::EARLIEST, justification: self.rng().gen(), @@ -211,91 +183,97 @@ impl UTHarness { self.consensus .inner .secret_key - .sign_msg(ConsensusMsg::LeaderCommit(msg)) + .sign_msg(msg) } - pub(crate) async fn new_procedural_leader_prepare_one(&mut self) -> Signed { + pub(crate) async fn new_procedural_leader_prepare(&mut self, ctx: &ctx::Ctx) -> Signed { let replica_prepare = self.new_current_replica_prepare(|_| {}); - self.dispatch_replica_prepare_one(replica_prepare.clone()) - .await - .unwrap(); - self.recv_signed().await.unwrap() + self.process_replica_prepare(ctx, replica_prepare).await.unwrap().unwrap() } - pub(crate) async fn new_procedural_leader_prepare_many(&mut self) -> Signed { + pub(crate) async fn new_procedural_leader_prepare_many(&mut self, ctx: &ctx::Ctx) -> Signed { let replica_prepare = self.new_current_replica_prepare(|_| {}).cast().unwrap().msg; - self.dispatch_replica_prepare_many( + self.process_replica_prepare_many( + ctx, vec![replica_prepare; self.consensus_threshold()], self.keys(), ) - .await; - self.recv_signed().await.unwrap() + .await.unwrap() } - pub(crate) async fn new_procedural_replica_commit_one(&mut self) -> Signed { - let replica_prepare = self.new_current_replica_prepare(|_| {}); - self.dispatch_replica_prepare_one(replica_prepare.clone()) - .await - .unwrap(); - let leader_prepare = self.recv_signed().await.unwrap(); - self.dispatch_leader_prepare(leader_prepare).await.unwrap(); - self.recv_signed().await.unwrap() + pub(crate) async fn new_procedural_replica_commit(&mut self, ctx: &ctx::Ctx) -> Signed { + let leader_prepare = self.new_procedural_leader_prepare(ctx).await; + self.process_leader_prepare(ctx,leader_prepare).await.unwrap() } - pub(crate) async fn new_procedural_replica_commit_many(&mut self) -> Signed { - let leader_prepare = self.new_procedural_leader_prepare_many().await; - self.dispatch_leader_prepare(leader_prepare).await.unwrap(); - self.recv_signed().await.unwrap() + pub(crate) async fn new_procedural_replica_commit_many(&mut self, ctx: &ctx::Ctx) -> Signed { + let leader_prepare = self.new_procedural_leader_prepare_many(ctx).await; + self.process_leader_prepare(ctx,leader_prepare).await.unwrap() } - pub(crate) async fn new_procedural_leader_commit_one(&mut self) -> Signed { - let replica_prepare = self.new_current_replica_prepare(|_| {}); - self.dispatch_replica_prepare_one(replica_prepare.clone()) - .await - .unwrap(); - let leader_prepare = self.recv_signed().await.unwrap(); - self.dispatch_leader_prepare(leader_prepare).await.unwrap(); - let replica_commit = self.recv_signed().await.unwrap(); - self.dispatch_replica_commit_one(replica_commit).unwrap(); - self.recv_signed().await.unwrap() + pub(crate) async fn new_procedural_leader_commit(&mut self, ctx: &ctx::Ctx) -> Signed { + let replica_commit = self.new_procedural_replica_commit(ctx).await; + self.process_replica_commit(ctx, replica_commit).await.unwrap().unwrap() } - pub(crate) async fn new_procedural_leader_commit_many(&mut self) -> Signed { - let replica_commit = self - .new_procedural_replica_commit_many() - .await - .cast() - .unwrap() - .msg; - self.dispatch_replica_commit_many( - vec![replica_commit; self.consensus_threshold()], + pub(crate) async fn new_procedural_leader_commit_many(&mut self, ctx: &ctx::Ctx) -> Signed { + let replica_commit = self.new_procedural_replica_commit_many(ctx).await; + self.process_replica_commit_many( + ctx, + vec![replica_commit.msg; self.consensus_threshold()], self.keys(), - ) - .unwrap(); - self.recv_signed().await.unwrap() + ).await.unwrap() + } + + pub(crate) async fn process_leader_prepare( + &mut self, + ctx: &ctx::Ctx, + msg: Signed, + ) -> Result, LeaderPrepareError> { + self + .consensus + .replica + .process_leader_prepare(&ctx, &self.consensus.inner, msg) + .await?; + Ok(self.try_recv().unwrap()) + } + + pub(crate) async fn process_leader_commit( + &mut self, + ctx: &ctx::Ctx, + msg: Signed, + ) -> Result<(), LeaderCommitError> { + self + .consensus + .replica + .process_leader_commit(ctx, &self.consensus.inner, msg) + .await } #[allow(clippy::result_large_err)] - pub(crate) async fn dispatch_replica_prepare_one( + pub(crate) async fn process_replica_prepare( &mut self, - msg: Signed, - ) -> Result<(), ReplicaPrepareError> { + ctx: &ctx::Ctx, + msg: Signed, + ) -> Result>, ReplicaPrepareError> { self.consensus .leader - .process_replica_prepare(&self.ctx, &self.consensus.inner, msg.cast().unwrap()) - .await + .process_replica_prepare(ctx, &self.consensus.inner, msg) + .await?; + Ok(self.try_recv()) } - pub(crate) async fn dispatch_replica_prepare_many( + pub(crate) async fn process_replica_prepare_many( &mut self, + ctx: &ctx::Ctx, messages: Vec, keys: Vec, - ) { + ) -> Option> { + let mut out = None; for (i, (msg, key)) in messages.into_iter().zip(keys).enumerate() { - let signed = key.sign_msg(ConsensusMsg::ReplicaPrepare(msg)); - let res = self.dispatch_replica_prepare_one(signed).await; + let res = self.process_replica_prepare(ctx,key.sign_msg(msg)).await; match (i + 1).cmp(&self.consensus_threshold()) { - Ordering::Equal => res.unwrap(), + Ordering::Equal => { out = Some(res.unwrap().unwrap()); }, Ordering::Less => assert_matches!( res, Err(ReplicaPrepareError::NumReceivedBelowThreshold { @@ -309,99 +287,56 @@ impl UTHarness { Ordering::Greater => assert_matches!(res, Err(ReplicaPrepareError::Old { .. })), } } + out } - pub(crate) fn dispatch_replica_commit_one( + pub(crate) async fn process_replica_commit( &mut self, - msg: Signed, - ) -> Result<(), ReplicaCommitError> { + ctx: &ctx::Ctx, + msg: Signed, + ) -> Result>, ReplicaCommitError> { self.consensus.leader.process_replica_commit( - &self.ctx, + &ctx, &self.consensus.inner, - msg.cast().unwrap(), - ) + msg, + )?; + Ok(self.try_recv()) } - pub(crate) fn dispatch_replica_commit_many( + pub(crate) async fn process_replica_commit_many( &mut self, + ctx: &ctx::Ctx, messages: Vec, keys: Vec, - ) -> Result<(), ReplicaCommitError> { - let len = messages.len(); - let consensus_threshold = self.consensus_threshold(); - messages - .into_iter() - .zip(keys) - .map(|(msg, key)| { - let signed = key.sign_msg(ConsensusMsg::ReplicaCommit(msg)); - self.dispatch_replica_commit_one(signed) - }) - .fold((0, None), |(i, _), res| { - let i = i + 1; - if i < len { - assert_matches!( - res, - Err(ReplicaCommitError::NumReceivedBelowThreshold { - num_messages, - threshold, - }) => { - assert_eq!(num_messages, i); - assert_eq!(threshold, consensus_threshold) - } - ); - } - (i, Some(res)) - }) - .1 - .unwrap() - } - - pub(crate) async fn dispatch_leader_prepare( - &mut self, - msg: Signed, - ) -> Result<(), LeaderPrepareError> { - scope::run!(&self.ctx, |ctx, s| { - s.spawn(async { - let res = self - .consensus - .replica - .process_leader_prepare(ctx, &self.consensus.inner, msg.cast().unwrap()) - .await; - Ok(res) - }) - .join(ctx) - }) - .await - .unwrap() - } - - pub(crate) async fn dispatch_leader_commit( - &mut self, - msg: Signed, - ) -> Result<(), LeaderCommitError> { - scope::run!(&self.ctx, |ctx, s| { - s.spawn(async { - let res = self - .consensus - .replica - .process_leader_commit(ctx, &self.consensus.inner, msg.cast().unwrap()) - .await; - Ok(res) - }) - .join(ctx) - }) - .await - .unwrap() + ) -> Option> { + let mut out = None; + for (i, (msg, key)) in messages.into_iter().zip(keys).enumerate() { + let res = self.process_replica_commit(ctx,key.sign_msg(msg)).await; + match (i + 1).cmp(&self.consensus_threshold()) { + Ordering::Equal => { out = Some(res.unwrap().unwrap()); }, + Ordering::Less => assert_matches!( + res, + Err(ReplicaCommitError::NumReceivedBelowThreshold { + num_messages, + threshold, + }) => { + assert_eq!(num_messages, i); + assert_eq!(threshold, self.consensus_threshold()) + } + ), + Ordering::Greater => assert_matches!(res, Err(ReplicaCommitError::Old { .. })), + } + } + out } - pub(crate) async fn recv_signed(&mut self) -> Result, Canceled> { + pub(crate) fn try_recv>(&mut self) -> Option> { self.pipe - .recv(&self.ctx) - .await - .map(|output_message| match output_message { + .try_recv() + .map(|message| match message { OutputMessage::Network(ConsensusInputMessage { - message: signed, .. - }) => signed, + message, .. + }) => message.cast().unwrap(), }) } diff --git a/node/libs/storage/src/replica_state.rs b/node/libs/storage/src/replica_state.rs index 98824fa2..8c85fed1 100644 --- a/node/libs/storage/src/replica_state.rs +++ b/node/libs/storage/src/replica_state.rs @@ -64,6 +64,11 @@ impl ReplicaStore { self.state.put_replica_state(ctx, replica_state).await } + /// Verify that `payload` is a correct proposal for the block `block_number`. + pub async fn verify_payload(&self, ctx: &ctx::Ctx, block_number: validator::BlockNumber, payload: &validator::Payload) -> ctx::Result<()> { + self.blocks.verify_payload(ctx,block_number,payload).await + } + /// Puts a block into this storage. pub async fn put_block( &self, diff --git a/node/libs/storage/src/traits.rs b/node/libs/storage/src/traits.rs index be06909f..b2b184eb 100644 --- a/node/libs/storage/src/traits.rs +++ b/node/libs/storage/src/traits.rs @@ -78,16 +78,21 @@ impl BlockStore for Arc { /// Mutable storage of L2 blocks. /// -/// Implementations **must** propagate context cancellation using [`StorageError::Canceled`]. +/// Implementations **must** propagate context cancellation using [`ctx::Error::Canceled`]. #[async_trait] pub trait WriteBlockStore: BlockStore { /// Verify that `payload` is a correct proposal for the block `block_number`. async fn verify_payload( &self, - _ctx: &ctx::Ctx, - _block_number: BlockNumber, - _payload: &Payload, - ) -> anyhow::Result<()> { + ctx: &ctx::Ctx, + block_number: BlockNumber, + payload: &Payload, + ) -> ctx::Result<()> { + if let Some(block) = self.block(ctx,block_number).await? { + if &block.payload!=payload { + Err(anyhow::anyhow!("block already stored with different payload"))?; + } + } Ok(()) } /// Puts a block into this storage. From 6362ca8280f42e9b6e750c35c368dc18778a8c76 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Sun, 3 Dec 2023 18:05:00 +0100 Subject: [PATCH 06/14] WIP strongly typed bft unit tests --- node/actors/bft/src/leader/tests.rs | 10 +- node/actors/bft/src/replica/tests.rs | 459 +++++++-------------- node/actors/bft/src/testonly/ut_harness.rs | 58 +-- 3 files changed, 159 insertions(+), 368 deletions(-) diff --git a/node/actors/bft/src/leader/tests.rs b/node/actors/bft/src/leader/tests.rs index abaec805..429f01b0 100644 --- a/node/actors/bft/src/leader/tests.rs +++ b/node/actors/bft/src/leader/tests.rs @@ -355,9 +355,8 @@ async fn replica_commit_num_received_below_threshold() { async fn replica_commit_invalid_sig() { let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; - let mut replica_commit = util.new_current_replica_commit(|_| {}); - replica_commit.sig = util.rng().gen(); + replica_commit.sig = ctx.rng().gen(); let res = util.process_replica_commit(ctx,replica_commit).await; assert_matches!(res, Err(ReplicaCommitError::InvalidSignature(..))); } @@ -392,10 +391,9 @@ async fn replica_commit_protocol_version_mismatch() { let replica_commit = util.process_leader_prepare(ctx,leader_prepare).await.unwrap(); assert!(util.process_replica_commit(ctx,replica_commit.clone()).await.is_err()); - let mut replica_commit2 = replica_commit.msg; - replica_commit2.protocol_version = ProtocolVersion(replica_commit.msg.protocol_version.0 + 1); - - let replica_commit2 = util.key_at(1).sign_msg(replica_commit2); + let mut replica_commit = replica_commit.msg; + replica_commit.protocol_version = ProtocolVersion(replica_commit.protocol_version.0 + 1); + let replica_commit = util.key_at(1).sign_msg(replica_commit); util.process_replica_commit(ctx,replica_commit).await.unwrap(); // PANICS: // "Couldn't create justification from valid replica messages!: CommitQC can only be created from votes for the same message." diff --git a/node/actors/bft/src/replica/tests.rs b/node/actors/bft/src/replica/tests.rs index c1263863..ceb897dd 100644 --- a/node/actors/bft/src/replica/tests.rs +++ b/node/actors/bft/src/replica/tests.rs @@ -1,100 +1,78 @@ use super::{ leader_commit::Error as LeaderCommitError, leader_prepare::Error as LeaderPrepareError, }; +use zksync_concurrency::ctx; use crate::{inner::ConsensusInner, leader::ReplicaPrepareError, testonly::ut_harness::UTHarness}; use assert_matches::assert_matches; use rand::Rng; -use std::cell::RefCell; use zksync_consensus_roles::validator::{ - BlockHeaderHash, ConsensusMsg, LeaderCommit, LeaderPrepare, Payload, PrepareQC, ReplicaCommit, + LeaderPrepare, Payload, ReplicaCommit, ReplicaPrepare, ViewNumber, }; #[tokio::test] async fn leader_prepare_sanity() { - let mut util = UTHarness::new_many().await; - + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new_many(ctx).await; util.set_view(util.owner_as_view_leader()); - - let leader_prepare = util.new_procedural_leader_prepare_many().await; - util.dispatch_leader_prepare(leader_prepare).await.unwrap(); + let leader_prepare = util.new_procedural_leader_prepare_many(ctx).await; + util.process_leader_prepare(ctx,leader_prepare).await.unwrap(); } #[tokio::test] async fn leader_prepare_reproposal_sanity() { - let mut util = UTHarness::new_many().await; - + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new_many(ctx).await; util.set_view(util.owner_as_view_leader()); - - let replica_prepare: ReplicaPrepare = - util.new_unfinalized_replica_prepare().cast().unwrap().msg; - util.dispatch_replica_prepare_many( + let replica_prepare = util.new_unfinalized_replica_prepare().msg; + let leader_prepare = util.process_replica_prepare_many(ctx, vec![replica_prepare.clone(); util.consensus_threshold()], util.keys(), ) - .await; - let leader_prepare_signed = util.recv_signed().await.unwrap(); + .await.unwrap(); - let leader_prepare = leader_prepare_signed - .clone() - .cast::() - .unwrap() - .msg; assert_matches!( - leader_prepare, + &leader_prepare.msg, LeaderPrepare {proposal_payload, .. } => { - assert_eq!(proposal_payload, None); + assert!(proposal_payload.is_none()); } ); - util.dispatch_leader_prepare(leader_prepare_signed) + util.process_leader_prepare(ctx,leader_prepare) .await .unwrap(); } #[tokio::test] async fn leader_prepare_sanity_yield_replica_commit() { - let mut util = UTHarness::new_one().await; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; - let leader_prepare = util.new_procedural_leader_prepare_one().await; - util.dispatch_leader_prepare(leader_prepare.clone()) + let leader_prepare = util.new_procedural_leader_prepare(ctx).await; + let replica_commit = util.process_leader_prepare(ctx,leader_prepare.clone()) .await .unwrap(); - let replica_commit = util - .recv_signed() - .await - .unwrap() - .cast::() - .unwrap() - .msg; - - let leader_prepare = leader_prepare.cast::().unwrap().msg; - assert_matches!( - replica_commit, + assert_eq!( + replica_commit.msg, ReplicaCommit { - protocol_version, - view, - proposal, - } => { - assert_eq!(protocol_version, leader_prepare.protocol_version); - assert_eq!(view, leader_prepare.view); - assert_eq!(proposal, leader_prepare.proposal); + protocol_version: leader_prepare.msg.protocol_version, + view: leader_prepare.msg.view, + proposal: leader_prepare.msg.proposal, } ); } #[tokio::test] async fn leader_prepare_invalid_leader() { - let mut util = UTHarness::new_with(2).await; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,2).await; let view = ViewNumber(2); util.set_view(view); assert_eq!(util.view_leader(view), util.key_at(0).public()); - let replica_prepare_one = util.new_current_replica_prepare(|_| {}); - let res = util - .dispatch_replica_prepare_one(replica_prepare_one.clone()) - .await; + let replica_prepare = util.new_current_replica_prepare(|_| {}); + let res = util.process_replica_prepare(ctx,replica_prepare.clone()).await; assert_matches!( res, Err(ReplicaPrepareError::NumReceivedBelowThreshold { @@ -103,23 +81,16 @@ async fn leader_prepare_invalid_leader() { }) ); - let replica_prepare_two = util.key_at(1).sign_msg(replica_prepare_one.msg); - util.dispatch_replica_prepare_one(replica_prepare_two) - .await - .unwrap(); - let msg = util.recv_signed().await.unwrap(); - let mut leader_prepare = msg.cast::().unwrap().msg; - + let replica_prepare = util.key_at(1).sign_msg(replica_prepare.msg); + let mut leader_prepare = util.process_replica_prepare(ctx,replica_prepare).await.unwrap().unwrap().msg; leader_prepare.view = leader_prepare.view.next(); assert_ne!( util.view_leader(leader_prepare.view), util.key_at(0).public() ); - let leader_prepare = util - .owner_key() - .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare)); - let res = util.dispatch_leader_prepare(leader_prepare).await; + let leader_prepare = util.owner_key().sign_msg(leader_prepare); + let res = util.process_leader_prepare(ctx,leader_prepare).await; assert_matches!( res, Err(LeaderPrepareError::InvalidLeader { correct_leader, received_leader }) => { @@ -131,20 +102,12 @@ async fn leader_prepare_invalid_leader() { #[tokio::test] async fn leader_prepare_old_view() { - let mut util = UTHarness::new_one().await; - - let mut leader_prepare = util - .new_procedural_leader_prepare_one() - .await - .cast::() - .unwrap() - .msg; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; + let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; leader_prepare.view = util.current_replica_view().prev(); - let leader_prepare = util - .owner_key() - .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare)); - - let res = util.dispatch_leader_prepare(leader_prepare).await; + let leader_prepare = util.owner_key().sign_msg(leader_prepare); + let res = util.process_leader_prepare(ctx,leader_prepare).await; assert_matches!( res, Err(LeaderPrepareError::Old { current_view, current_phase }) => { @@ -171,7 +134,7 @@ async fn leader_prepare_invalid_payload() { .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare)); - let res = util.dispatch_leader_prepare(leader_prepare).await; + let res = util.process_leader_prepare(ctx,leader_prepare).await; assert_matches!( res, Err(LeaderPrepareError::ProposalInvalidPayload(..)), @@ -180,138 +143,78 @@ async fn leader_prepare_invalid_payload() { #[tokio::test] async fn leader_prepare_invalid_sig() { - let mut util = UTHarness::new_one().await; - - let mut leader_prepare = util.new_rnd_leader_prepare(|_| {}); - leader_prepare.sig = util.rng().gen(); - - let res = util.dispatch_leader_prepare(leader_prepare).await; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; + let mut leader_prepare = util.new_rnd_leader_prepare(&mut ctx.rng(),|_| {}); + leader_prepare.sig = ctx.rng().gen(); + let res = util.process_leader_prepare(ctx,leader_prepare).await; assert_matches!(res, Err(LeaderPrepareError::InvalidSignature(..))); } #[tokio::test] async fn leader_prepare_invalid_prepare_qc() { - let mut util = UTHarness::new_one().await; - - let mut leader_prepare = util - .new_procedural_leader_prepare_one() - .await - .cast::() - .unwrap() - .msg; - leader_prepare.justification = util.rng().gen::(); - let leader_prepare = util - .owner_key() - .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare)); - - let res = util.dispatch_leader_prepare(leader_prepare).await; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; + let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; + leader_prepare.justification = ctx.rng().gen(); + let leader_prepare = util.owner_key().sign_msg(leader_prepare); + let res = util.process_leader_prepare(ctx,leader_prepare).await; assert_matches!( res, - Err(LeaderPrepareError::InvalidPrepareQC(err)) => { - assert_eq!(err.to_string(), "PrepareQC contains messages for different views!") - } + Err(LeaderPrepareError::InvalidPrepareQC(_)) ); } #[tokio::test] async fn leader_prepare_invalid_high_qc() { - let mut util = UTHarness::new_one().await; - - let mut replica_prepare = util - .new_current_replica_prepare(|_| {}) - .cast::() - .unwrap() - .msg; - replica_prepare.high_qc = util.rng().gen(); - - let mut leader_prepare = util - .new_procedural_leader_prepare_one() - .await - .cast::() - .unwrap() - .msg; - - let high_qc = util.rng().gen(); - leader_prepare.justification = util.new_prepare_qc(|msg| msg.high_qc = high_qc); - let leader_prepare = util - .owner_key() - .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare)); - - let res = util.dispatch_leader_prepare(leader_prepare).await; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; + let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; + leader_prepare.justification = util.new_prepare_qc(|msg| msg.high_qc = ctx.rng().gen()); + let leader_prepare = util.owner_key().sign_msg(leader_prepare); + let res = util.process_leader_prepare(ctx,leader_prepare).await; assert_matches!(res, Err(LeaderPrepareError::InvalidHighQC(_))); } #[tokio::test] async fn leader_prepare_proposal_oversized_payload() { - let mut util = UTHarness::new_one().await; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; let payload_oversize = ConsensusInner::PAYLOAD_MAX_SIZE + 1; let payload_vec = vec![0; payload_oversize]; - - let mut leader_prepare = util - .new_procedural_leader_prepare_one() - .await - .cast::() - .unwrap() - .msg; + let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; leader_prepare.proposal_payload = Some(Payload(payload_vec)); - let leader_prepare_signed = util - .owner_key() - .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare.clone())); - - let res = util.dispatch_leader_prepare(leader_prepare_signed).await; + let leader_prepare = util.owner_key().sign_msg(leader_prepare); + let res = util.process_leader_prepare(ctx,leader_prepare.clone()).await; assert_matches!( res, Err(LeaderPrepareError::ProposalOversizedPayload{ payload_size, header }) => { assert_eq!(payload_size, payload_oversize); - assert_eq!(header, leader_prepare.proposal); + assert_eq!(header, leader_prepare.msg.proposal); } ); } #[tokio::test] async fn leader_prepare_proposal_mismatched_payload() { - let mut util = UTHarness::new_one().await; - - let mut leader_prepare = util - .new_procedural_leader_prepare_one() - .await - .cast::() - .unwrap() - .msg; - leader_prepare.proposal_payload = Some(util.rng().gen()); - let leader_prepare_signed = util - .owner_key() - .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare.clone())); - - let res = util.dispatch_leader_prepare(leader_prepare_signed).await; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; + let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; + leader_prepare.proposal_payload = Some(ctx.rng().gen()); + let leader_prepare = util.owner_key().sign_msg(leader_prepare); + let res = util.process_leader_prepare(ctx,leader_prepare).await; assert_matches!(res, Err(LeaderPrepareError::ProposalMismatchedPayload)); } #[tokio::test] async fn leader_prepare_proposal_when_previous_not_finalized() { - let mut util = UTHarness::new_one().await; - + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; let replica_prepare = util.new_current_replica_prepare(|_| {}); - util.dispatch_replica_prepare_one(replica_prepare.clone()) - .await - .unwrap(); - - let mut leader_prepare = util - .recv_signed() - .await - .unwrap() - .cast::() - .unwrap() - .msg; - - let high_vote = util.rng().gen(); - leader_prepare.justification = util.new_prepare_qc(|msg| msg.high_vote = high_vote); - - let leader_prepare_signed = util - .owner_key() - .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare.clone())); - - let res = util.dispatch_leader_prepare(leader_prepare_signed).await; + let mut leader_prepare = util.process_replica_prepare(ctx,replica_prepare).await.unwrap().unwrap().msg; + leader_prepare.justification = util.new_prepare_qc(|msg| msg.high_vote = ctx.rng().gen()); + let leader_prepare = util.owner_key().sign_msg(leader_prepare); + let res = util.process_leader_prepare(ctx,leader_prepare).await; assert_matches!( res, Err(LeaderPrepareError::ProposalWhenPreviousNotFinalized) @@ -320,32 +223,13 @@ async fn leader_prepare_proposal_when_previous_not_finalized() { #[tokio::test] async fn leader_prepare_proposal_invalid_parent_hash() { - let mut util = UTHarness::new_one().await; - - let replica_prepare_signed = util.new_current_replica_prepare(|_| {}); - let replica_prepare = replica_prepare_signed - .clone() - .cast::() - .unwrap() - .msg; - util.dispatch_replica_prepare_one(replica_prepare_signed.clone()) - .await - .unwrap(); - let mut leader_prepare = util - .recv_signed() - .await - .unwrap() - .cast::() - .unwrap() - .msg; - - let junk: BlockHeaderHash = util.rng().gen(); - leader_prepare.proposal.parent = junk; - let leader_prepare_signed = util - .owner_key() - .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare.clone())); - - let res = util.dispatch_leader_prepare(leader_prepare_signed).await; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; + let replica_prepare = util.new_current_replica_prepare(|_| {}); + let mut leader_prepare = util.process_replica_prepare(ctx,replica_prepare.clone()).await.unwrap().unwrap().msg; + leader_prepare.proposal.parent = ctx.rng().gen(); + let leader_prepare = util.owner_key().sign_msg(leader_prepare); + let res = util.process_leader_prepare(ctx,leader_prepare.clone()).await; assert_matches!( res, Err(LeaderPrepareError::ProposalInvalidParentHash { @@ -353,195 +237,128 @@ async fn leader_prepare_proposal_invalid_parent_hash() { received_parent_hash, header }) => { - assert_eq!(correct_parent_hash, replica_prepare.high_vote.proposal.hash()); - assert_eq!(received_parent_hash, junk); - assert_eq!(header, leader_prepare.proposal); + assert_eq!(correct_parent_hash, replica_prepare.msg.high_vote.proposal.hash()); + assert_eq!(received_parent_hash, leader_prepare.msg.proposal.parent); + assert_eq!(header, leader_prepare.msg.proposal); } ); } #[tokio::test] async fn leader_prepare_proposal_non_sequential_number() { - let mut util = UTHarness::new_one().await; - - let replica_prepare_signed = util.new_current_replica_prepare(|_| {}); - let replica_prepare = replica_prepare_signed - .clone() - .cast::() - .unwrap() - .msg; - util.dispatch_replica_prepare_one(replica_prepare_signed) - .await - .unwrap(); - let mut leader_prepare = util - .recv_signed() - .await - .unwrap() - .cast::() - .unwrap() - .msg; - - let correct_num = replica_prepare.high_vote.proposal.number.next(); + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; + let replica_prepare = util.new_current_replica_prepare(|_| {}); + let mut leader_prepare = util.process_replica_prepare(ctx,replica_prepare.clone()).await.unwrap().unwrap().msg; + let correct_num = replica_prepare.msg.high_vote.proposal.number.next(); assert_eq!(correct_num, leader_prepare.proposal.number); let non_seq_num = correct_num.next(); leader_prepare.proposal.number = non_seq_num; - let leader_prepare_signed = util - .owner_key() - .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare.clone())); - - let res = util.dispatch_leader_prepare(leader_prepare_signed).await; + let leader_prepare = util.owner_key().sign_msg(leader_prepare); + let res = util.process_leader_prepare(ctx,leader_prepare.clone()).await; assert_matches!( res, Err(LeaderPrepareError::ProposalNonSequentialNumber { correct_number, received_number, header }) => { assert_eq!(correct_number, correct_num); assert_eq!(received_number, non_seq_num); - assert_eq!(header, leader_prepare.proposal); + assert_eq!(header, leader_prepare.msg.proposal); } ); } #[tokio::test] async fn leader_prepare_reproposal_without_quorum() { - let mut util = UTHarness::new_many().await; - + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let mut util = UTHarness::new_many(ctx).await; util.set_view(util.owner_as_view_leader()); - - let mut leader_prepare = util - .new_procedural_leader_prepare_many() - .await - .cast::() - .unwrap() - .msg; - - let rng = RefCell::new(util.new_rng()); - leader_prepare.justification = util.new_prepare_qc_many(&|msg: &mut ReplicaPrepare| { - let mut rng = rng.borrow_mut(); - msg.high_vote = rng.gen(); - }); + let mut leader_prepare = util.new_procedural_leader_prepare_many(ctx).await.msg; + leader_prepare.justification = util.new_prepare_qc_many(|msg| msg.high_vote = rng.gen()); leader_prepare.proposal_payload = None; - - let leader_prepare = util - .owner_key() - .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare)); - let res = util.dispatch_leader_prepare(leader_prepare).await; + let leader_prepare = util.owner_key().sign_msg(leader_prepare); + let res = util.process_leader_prepare(ctx,leader_prepare).await; assert_matches!(res, Err(LeaderPrepareError::ReproposalWithoutQuorum)); } #[tokio::test] async fn leader_prepare_reproposal_when_finalized() { - let mut util = UTHarness::new_one().await; - - let mut leader_prepare = util - .new_procedural_leader_prepare_one() - .await - .cast::() - .unwrap() - .msg; + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; + let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; leader_prepare.proposal_payload = None; - let leader_prepare_signed = util - .owner_key() - .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare.clone())); - - let res = util.dispatch_leader_prepare(leader_prepare_signed).await; + let leader_prepare = util.owner_key().sign_msg(leader_prepare); + let res = util.process_leader_prepare(ctx,leader_prepare).await; assert_matches!(res, Err(LeaderPrepareError::ReproposalWhenFinalized)); } #[tokio::test] async fn leader_prepare_reproposal_invalid_block() { - let mut util = UTHarness::new_one().await; - - let mut leader_prepare: LeaderPrepare = util - .new_procedural_leader_prepare_one() - .await - .cast() - .unwrap() - .msg; - - let high_vote = util.rng().gen(); - leader_prepare.justification = util.new_prepare_qc(|msg: &mut ReplicaPrepare| { - msg.high_vote = high_vote; - }); + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; + let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; + leader_prepare.justification = util.new_prepare_qc(|msg| msg.high_vote = ctx.rng().gen()); leader_prepare.proposal_payload = None; - - let leader_prepare = util - .owner_key() - .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare)); - - let res = util.dispatch_leader_prepare(leader_prepare).await; + let leader_prepare = util.owner_key().sign_msg(leader_prepare); + let res = util.process_leader_prepare(ctx,leader_prepare).await; assert_matches!(res, Err(LeaderPrepareError::ReproposalInvalidBlock)); } #[tokio::test] async fn leader_commit_sanity() { - let mut util = UTHarness::new_many().await; - + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new_many(ctx).await; util.set_view(util.owner_as_view_leader()); - - let leader_commit = util.new_procedural_leader_commit_many().await; - util.dispatch_leader_commit(leader_commit).await.unwrap(); + let leader_commit = util.new_procedural_leader_commit_many(ctx).await; + util.process_leader_commit(ctx,leader_commit).await.unwrap(); } #[tokio::test] async fn leader_commit_sanity_yield_replica_prepare() { - let mut util = UTHarness::new_one().await; - - let leader_commit = util.new_procedural_leader_commit_one().await; - util.dispatch_leader_commit(leader_commit.clone()) - .await - .unwrap(); - let replica_prepare = util - .recv_signed() - .await - .unwrap() - .cast::() - .unwrap() - .msg; - - let leader_commit = leader_commit.cast::().unwrap().msg; - assert_matches!( - replica_prepare, + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; + let leader_commit = util.new_procedural_leader_commit(ctx).await; + let replica_prepare = util.process_leader_commit(ctx,leader_commit.clone()).await.unwrap(); + assert_eq!( + replica_prepare.msg, ReplicaPrepare { - protocol_version, - view, - high_vote, - high_qc, - } => { - assert_eq!(protocol_version, leader_commit.protocol_version); - assert_eq!(view, leader_commit.justification.message.view.next()); - assert_eq!(high_vote, leader_commit.justification.message); - assert_eq!(high_qc, leader_commit.justification) + protocol_version: leader_commit.msg.protocol_version, + view: leader_commit.msg.justification.message.view.next(), + high_vote: leader_commit.msg.justification.message.clone(), + high_qc: leader_commit.msg.justification, } ); } #[tokio::test] async fn leader_commit_invalid_leader() { - let mut util = UTHarness::new_with(2).await; - + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,2).await; let current_view_leader = util.view_leader(util.current_replica_view()); assert_ne!(current_view_leader, util.owner_key().public()); - let leader_commit = util.new_rnd_leader_commit(|_| {}); - let res = util.dispatch_leader_commit(leader_commit).await; + let leader_commit = util.new_rnd_leader_commit(&mut ctx.rng(),|_| {}); + let res = util.process_leader_commit(ctx,leader_commit).await; assert_matches!(res, Err(LeaderCommitError::InvalidLeader { .. })); } #[tokio::test] async fn leader_commit_invalid_sig() { - let mut util = UTHarness::new_one().await; - - let mut leader_commit = util.new_rnd_leader_commit(|_| {}); - leader_commit.sig = util.rng().gen(); - let res = util.dispatch_leader_commit(leader_commit).await; + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let mut util = UTHarness::new(ctx,1).await; + let mut leader_commit = util.new_rnd_leader_commit(rng,|_| {}); + leader_commit.sig = rng.gen(); + let res = util.process_leader_commit(ctx,leader_commit).await; assert_matches!(res, Err(LeaderCommitError::InvalidSignature { .. })); } #[tokio::test] async fn leader_commit_invalid_commit_qc() { - let mut util = UTHarness::new_one().await; - - let leader_commit = util.new_rnd_leader_commit(|_| {}); - let res = util.dispatch_leader_commit(leader_commit).await; + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let mut util = UTHarness::new(ctx,1).await; + let leader_commit = util.new_rnd_leader_commit(rng,|_| {}); + let res = util.process_leader_commit(ctx,leader_commit).await; assert_matches!(res, Err(LeaderCommitError::InvalidJustification { .. })); } diff --git a/node/actors/bft/src/testonly/ut_harness.rs b/node/actors/bft/src/testonly/ut_harness.rs index 278a5e40..397d8dc1 100644 --- a/node/actors/bft/src/testonly/ut_harness.rs +++ b/node/actors/bft/src/testonly/ut_harness.rs @@ -6,12 +6,12 @@ use crate::{ }; use zksync_consensus_utils::enum_util::{Variant}; use assert_matches::assert_matches; -use rand::{rngs::StdRng, Rng}; +use rand::{Rng}; use std::cmp::Ordering; use zksync_concurrency::ctx; use zksync_consensus_network::io::ConsensusInputMessage; use zksync_consensus_roles::validator::{ - self, BlockHeader, CommitQC, ConsensusMsg, LeaderCommit, LeaderPrepare, Payload, Phase, + self, BlockHeader, CommitQC, LeaderCommit, LeaderPrepare, Payload, Phase, PrepareQC, ReplicaCommit, ReplicaPrepare, SecretKey, Signed, ViewNumber, }; use zksync_consensus_utils::pipe::DispatcherPipe; @@ -77,14 +77,6 @@ impl UTHarness { self.keys.clone() } - pub(crate) fn rng(&mut self) -> &mut StdRng { - &mut self.rng - } - - pub(crate) fn new_rng(&self) -> StdRng { - self.ctx.rng() - } - pub(crate) fn set_view(&mut self, view: ViewNumber) { self.set_replica_view(view); self.set_leader_view(view); @@ -133,9 +125,10 @@ impl UTHarness { pub(crate) fn new_rnd_leader_prepare( &mut self, + rng: &mut impl Rng, mutate_fn: impl FnOnce(&mut LeaderPrepare), - ) -> Signed { - let payload: Payload = self.rng().gen(); + ) -> Signed { + let payload: Payload = rng.gen(); let mut msg = LeaderPrepare { protocol_version: validator::ProtocolVersion::EARLIEST, view: self.consensus.leader.view, @@ -145,15 +138,12 @@ impl UTHarness { payload: payload.hash(), }, proposal_payload: Some(payload), - justification: self.rng().gen(), + justification: rng.gen(), }; mutate_fn(&mut msg); - self.consensus - .inner - .secret_key - .sign_msg(ConsensusMsg::LeaderPrepare(msg)) + self.consensus.inner.secret_key.sign_msg(msg) } pub(crate) fn new_current_replica_commit( @@ -171,19 +161,15 @@ impl UTHarness { pub(crate) fn new_rnd_leader_commit( &mut self, + rng: &mut impl Rng, mutate_fn: impl FnOnce(&mut LeaderCommit), ) -> Signed { let mut msg = LeaderCommit { protocol_version: validator::ProtocolVersion::EARLIEST, - justification: self.rng().gen(), + justification: rng.gen(), }; - mutate_fn(&mut msg); - - self.consensus - .inner - .secret_key - .sign_msg(msg) + self.consensus.inner.secret_key.sign_msg(msg) } pub(crate) async fn new_procedural_leader_prepare(&mut self, ctx: &ctx::Ctx) -> Signed { @@ -242,12 +228,13 @@ impl UTHarness { &mut self, ctx: &ctx::Ctx, msg: Signed, - ) -> Result<(), LeaderCommitError> { + ) -> Result, LeaderCommitError> { self .consensus .replica .process_leader_commit(ctx, &self.consensus.inner, msg) - .await + .await?; + Ok(self.try_recv().unwrap()) } #[allow(clippy::result_large_err)] @@ -370,25 +357,18 @@ impl UTHarness { pub(crate) fn new_prepare_qc(&self, mutate_fn: impl FnOnce(&mut ReplicaPrepare)) -> PrepareQC { let validator_set = validator::ValidatorSet::new(self.keys.iter().map(|k| k.public())).unwrap(); - - let msg: ReplicaPrepare = self - .new_current_replica_prepare(mutate_fn) - .cast() - .unwrap() - .msg; - + let msg = self.new_current_replica_prepare(mutate_fn).msg; let signed_messages: Vec<_> = self .keys .iter() .map(|sk| sk.sign_msg(msg.clone())) .collect(); - PrepareQC::from(&signed_messages, &validator_set).unwrap() } pub(crate) fn new_prepare_qc_many( &mut self, - mutate_fn: &dyn Fn(&mut ReplicaPrepare), + mutate_fn: impl FnMut(&mut ReplicaPrepare), ) -> PrepareQC { let validator_set = validator::ValidatorSet::new(self.keys.iter().map(|k| k.public())).unwrap(); @@ -397,12 +377,8 @@ impl UTHarness { .keys .iter() .map(|sk| { - let msg: ReplicaPrepare = self - .new_current_replica_prepare(|msg| mutate_fn(msg)) - .cast() - .unwrap() - .msg; - sk.sign_msg(msg.clone()) + let msg = self.new_current_replica_prepare(|msg| mutate_fn(msg)).msg; + sk.sign_msg(msg) }) .collect(); From 18c0077faac720a3cd5db6e6852a804d457c4ef4 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Sun, 3 Dec 2023 18:50:20 +0100 Subject: [PATCH 07/14] tests pass --- node/actors/bft/src/leader/tests.rs | 46 +++++-- node/actors/bft/src/replica/tests.rs | 44 ++++--- node/actors/bft/src/testonly/ut_harness.rs | 132 ++++++--------------- 3 files changed, 94 insertions(+), 128 deletions(-) diff --git a/node/actors/bft/src/leader/tests.rs b/node/actors/bft/src/leader/tests.rs index 429f01b0..19f10e8e 100644 --- a/node/actors/bft/src/leader/tests.rs +++ b/node/actors/bft/src/leader/tests.rs @@ -12,14 +12,16 @@ use zksync_consensus_roles::validator::{ #[tokio::test] async fn replica_prepare_sanity() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new_many(ctx).await; util.set_view(util.owner_as_view_leader()); - util.new_procedural_leader_prepare_many(ctx).await; + util.new_procedural_leader_prepare(ctx).await; } #[tokio::test] async fn replica_prepare_sanity_yield_leader_prepare() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; @@ -44,12 +46,13 @@ async fn replica_prepare_sanity_yield_leader_prepare() { #[tokio::test] async fn replica_prepare_sanity_yield_leader_prepare_reproposal() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new_many(ctx).await; util.set_view(util.owner_as_view_leader()); let replica_prepare = util.new_unfinalized_replica_prepare().msg; - let leader_prepare = util.process_replica_prepare_many(ctx, vec![replica_prepare.clone(); util.consensus_threshold()], util.keys()).await.unwrap(); + let leader_prepare = util.process_replica_prepare_all(ctx, replica_prepare.clone()).await; assert_matches!( leader_prepare.msg, @@ -77,6 +80,7 @@ async fn replica_prepare_sanity_yield_leader_prepare_reproposal() { #[tokio::test] async fn replica_prepare_old_view() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; util.set_replica_view(ViewNumber(1)); @@ -96,6 +100,7 @@ async fn replica_prepare_old_view() { #[tokio::test] async fn replica_prepare_during_commit() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; util.set_leader_phase(Phase::Commit); @@ -113,9 +118,10 @@ async fn replica_prepare_during_commit() { #[tokio::test] async fn replica_prepare_not_leader_in_view() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,2).await; - let current_view_leader = util.view_leader(util.current_replica_view()); + let current_view_leader = util.view_leader(util.consensus.replica.view); assert_ne!(current_view_leader, util.owner_key().public()); let replica_prepare = util.new_current_replica_prepare(|_| {}); @@ -145,6 +151,7 @@ async fn replica_prepare_already_exists() { #[tokio::test] async fn replica_prepare_num_received_below_threshold() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx, 2).await; let view = ViewNumber(2); @@ -165,6 +172,7 @@ async fn replica_prepare_num_received_below_threshold() { #[tokio::test] async fn replica_prepare_invalid_sig() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; let mut replica_prepare = util.new_current_replica_prepare(|_| {}); @@ -175,6 +183,7 @@ async fn replica_prepare_invalid_sig() { #[tokio::test] async fn replica_prepare_invalid_commit_qc() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; let replica_prepare = util.new_current_replica_prepare(|msg| msg.high_qc = ctx.rng().gen()); @@ -184,6 +193,7 @@ async fn replica_prepare_invalid_commit_qc() { #[tokio::test] async fn replica_prepare_high_qc_of_current_view() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; let view = ViewNumber(1); @@ -203,6 +213,7 @@ async fn replica_prepare_high_qc_of_current_view() { #[tokio::test] async fn replica_prepare_high_qc_of_future_view() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; @@ -224,12 +235,13 @@ async fn replica_prepare_high_qc_of_future_view() { #[ignore = "fails/unsupported"] #[tokio::test] async fn replica_prepare_non_validator_signer() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,2).await; let view = ViewNumber(2); util.set_view(view); - assert_eq!(util.view_leader(view), util.key_at(0).public()); + assert_eq!(util.view_leader(view), util.keys[0].public()); let replica_prepare = util.new_current_replica_prepare(|_| {}); assert!(util .process_replica_prepare(ctx,replica_prepare.clone()) @@ -247,14 +259,16 @@ async fn replica_prepare_non_validator_signer() { #[tokio::test] async fn replica_commit_sanity() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new_many(ctx).await; util.set_view(util.owner_as_view_leader()); - util.new_procedural_leader_commit_many(ctx).await; + util.new_procedural_leader_commit(ctx).await; } #[tokio::test] async fn replica_commit_sanity_yield_leader_commit() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; let replica_commit = util.new_procedural_replica_commit(ctx).await; @@ -273,17 +287,18 @@ async fn replica_commit_sanity_yield_leader_commit() { #[tokio::test] async fn replica_commit_old() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; let mut replica_commit = util.new_procedural_replica_commit(ctx).await.msg; - replica_commit.view = util.current_replica_view().prev(); + replica_commit.view = util.consensus.replica.view.prev(); let replica_commit = util.owner_key().sign_msg(replica_commit); let res = util.process_replica_commit(ctx,replica_commit).await; assert_matches!( res, Err(ReplicaCommitError::Old { current_view, current_phase }) => { - assert_eq!(current_view, util.current_replica_view()); - assert_eq!(current_phase, util.current_replica_phase()); + assert_eq!(current_view, util.consensus.replica.view); + assert_eq!(current_phase, util.consensus.replica.phase); } ); } @@ -293,7 +308,7 @@ async fn replica_commit_not_leader_in_view() { let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,2).await; - let current_view_leader = util.view_leader(util.current_replica_view()); + let current_view_leader = util.view_leader(util.consensus.replica.view); assert_ne!(current_view_leader, util.owner_key().public()); let replica_commit = util.new_current_replica_commit(|_| {}); @@ -303,6 +318,7 @@ async fn replica_commit_not_leader_in_view() { #[tokio::test] async fn replica_commit_already_exists() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,2).await; @@ -313,7 +329,7 @@ async fn replica_commit_already_exists() { let replica_prepare1 = util.new_current_replica_prepare(|_| {}); assert!(util.process_replica_prepare(ctx,replica_prepare1.clone()).await.is_err()); - let replica_prepare2 = util.key_at(1).sign_msg(replica_prepare1.msg); + let replica_prepare2 = util.keys[1].sign_msg(replica_prepare1.msg); let leader_prepare = util.process_replica_prepare(ctx,replica_prepare2).await.unwrap().unwrap(); let replica_commit = util.process_leader_prepare(ctx,leader_prepare).await.unwrap(); assert!(util.process_replica_commit(ctx,replica_commit.clone()).await.is_err()); @@ -328,6 +344,7 @@ async fn replica_commit_already_exists() { #[tokio::test] async fn replica_commit_num_received_below_threshold() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,2).await; @@ -338,7 +355,7 @@ async fn replica_commit_num_received_below_threshold() { let replica_prepare = util.new_current_replica_prepare(|_| {}); assert!(util.process_replica_prepare(ctx,replica_prepare.clone()).await.is_err()); - let replica_prepare = util.key_at(1).sign_msg(replica_prepare.msg); + let replica_prepare = util.keys[1].sign_msg(replica_prepare.msg); let leader_prepare = util.process_replica_prepare(ctx,replica_prepare).await.unwrap().unwrap(); let replica_commit = util.process_leader_prepare(ctx,leader_prepare).await.unwrap(); let res = util.process_replica_commit(ctx,replica_commit.clone()).await; @@ -353,6 +370,7 @@ async fn replica_commit_num_received_below_threshold() { #[tokio::test] async fn replica_commit_invalid_sig() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; let mut replica_commit = util.new_current_replica_commit(|_| {}); @@ -363,6 +381,7 @@ async fn replica_commit_invalid_sig() { #[tokio::test] async fn replica_commit_unexpected_proposal() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; let replica_commit = util.new_current_replica_commit(|_| {}); @@ -373,6 +392,7 @@ async fn replica_commit_unexpected_proposal() { #[ignore = "fails/unsupported"] #[tokio::test] async fn replica_commit_protocol_version_mismatch() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,2).await; @@ -386,14 +406,14 @@ async fn replica_commit_protocol_version_mismatch() { .process_replica_prepare(ctx,replica_prepare.clone()) .await .is_err()); - let replica_prepare = util.key_at(1).sign_msg(replica_prepare.msg); + let replica_prepare = util.keys[1].sign_msg(replica_prepare.msg); let leader_prepare = util.process_replica_prepare(ctx,replica_prepare).await.unwrap().unwrap(); let replica_commit = util.process_leader_prepare(ctx,leader_prepare).await.unwrap(); assert!(util.process_replica_commit(ctx,replica_commit.clone()).await.is_err()); let mut replica_commit = replica_commit.msg; replica_commit.protocol_version = ProtocolVersion(replica_commit.protocol_version.0 + 1); - let replica_commit = util.key_at(1).sign_msg(replica_commit); + let replica_commit = util.keys[1].sign_msg(replica_commit); util.process_replica_commit(ctx,replica_commit).await.unwrap(); // PANICS: // "Couldn't create justification from valid replica messages!: CommitQC can only be created from votes for the same message." diff --git a/node/actors/bft/src/replica/tests.rs b/node/actors/bft/src/replica/tests.rs index ceb897dd..17127923 100644 --- a/node/actors/bft/src/replica/tests.rs +++ b/node/actors/bft/src/replica/tests.rs @@ -12,24 +12,22 @@ use zksync_consensus_roles::validator::{ #[tokio::test] async fn leader_prepare_sanity() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new_many(ctx).await; util.set_view(util.owner_as_view_leader()); - let leader_prepare = util.new_procedural_leader_prepare_many(ctx).await; + let leader_prepare = util.new_procedural_leader_prepare(ctx).await; util.process_leader_prepare(ctx,leader_prepare).await.unwrap(); } #[tokio::test] async fn leader_prepare_reproposal_sanity() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new_many(ctx).await; util.set_view(util.owner_as_view_leader()); let replica_prepare = util.new_unfinalized_replica_prepare().msg; - let leader_prepare = util.process_replica_prepare_many(ctx, - vec![replica_prepare.clone(); util.consensus_threshold()], - util.keys(), - ) - .await.unwrap(); + let leader_prepare = util.process_replica_prepare_all(ctx,replica_prepare.clone()).await; assert_matches!( &leader_prepare.msg, @@ -45,6 +43,7 @@ async fn leader_prepare_reproposal_sanity() { #[tokio::test] async fn leader_prepare_sanity_yield_replica_commit() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; @@ -64,12 +63,13 @@ async fn leader_prepare_sanity_yield_replica_commit() { #[tokio::test] async fn leader_prepare_invalid_leader() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,2).await; let view = ViewNumber(2); util.set_view(view); - assert_eq!(util.view_leader(view), util.key_at(0).public()); + assert_eq!(util.view_leader(view), util.keys[0].public()); let replica_prepare = util.new_current_replica_prepare(|_| {}); let res = util.process_replica_prepare(ctx,replica_prepare.clone()).await; @@ -81,12 +81,12 @@ async fn leader_prepare_invalid_leader() { }) ); - let replica_prepare = util.key_at(1).sign_msg(replica_prepare.msg); + let replica_prepare = util.keys[1].sign_msg(replica_prepare.msg); let mut leader_prepare = util.process_replica_prepare(ctx,replica_prepare).await.unwrap().unwrap().msg; leader_prepare.view = leader_prepare.view.next(); assert_ne!( util.view_leader(leader_prepare.view), - util.key_at(0).public() + util.keys[0].public() ); let leader_prepare = util.owner_key().sign_msg(leader_prepare); @@ -94,25 +94,26 @@ async fn leader_prepare_invalid_leader() { assert_matches!( res, Err(LeaderPrepareError::InvalidLeader { correct_leader, received_leader }) => { - assert_eq!(correct_leader, util.key_at(1).public()); - assert_eq!(received_leader, util.key_at(0).public()); + assert_eq!(correct_leader, util.keys[1].public()); + assert_eq!(received_leader, util.keys[0].public()); } ); } #[tokio::test] async fn leader_prepare_old_view() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; - leader_prepare.view = util.current_replica_view().prev(); + leader_prepare.view = util.consensus.replica.view.prev(); let leader_prepare = util.owner_key().sign_msg(leader_prepare); let res = util.process_leader_prepare(ctx,leader_prepare).await; assert_matches!( res, Err(LeaderPrepareError::Old { current_view, current_phase }) => { - assert_eq!(current_view, util.current_replica_view()); - assert_eq!(current_phase, util.current_replica_phase()); + assert_eq!(current_view, util.consensus.replica.view); + assert_eq!(current_phase, util.consensus.replica.phase); } ); } @@ -143,6 +144,7 @@ async fn leader_prepare_invalid_payload() { #[tokio::test] async fn leader_prepare_invalid_sig() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; let mut leader_prepare = util.new_rnd_leader_prepare(&mut ctx.rng(),|_| {}); @@ -167,6 +169,7 @@ async fn leader_prepare_invalid_prepare_qc() { #[tokio::test] async fn leader_prepare_invalid_high_qc() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; @@ -197,6 +200,7 @@ async fn leader_prepare_proposal_oversized_payload() { #[tokio::test] async fn leader_prepare_proposal_mismatched_payload() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; @@ -208,6 +212,7 @@ async fn leader_prepare_proposal_mismatched_payload() { #[tokio::test] async fn leader_prepare_proposal_when_previous_not_finalized() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; let replica_prepare = util.new_current_replica_prepare(|_| {}); @@ -246,6 +251,7 @@ async fn leader_prepare_proposal_invalid_parent_hash() { #[tokio::test] async fn leader_prepare_proposal_non_sequential_number() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; let replica_prepare = util.new_current_replica_prepare(|_| {}); @@ -269,11 +275,12 @@ async fn leader_prepare_proposal_non_sequential_number() { #[tokio::test] async fn leader_prepare_reproposal_without_quorum() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let mut util = UTHarness::new_many(ctx).await; util.set_view(util.owner_as_view_leader()); - let mut leader_prepare = util.new_procedural_leader_prepare_many(ctx).await.msg; + let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; leader_prepare.justification = util.new_prepare_qc_many(|msg| msg.high_vote = rng.gen()); leader_prepare.proposal_payload = None; let leader_prepare = util.owner_key().sign_msg(leader_prepare); @@ -294,6 +301,7 @@ async fn leader_prepare_reproposal_when_finalized() { #[tokio::test] async fn leader_prepare_reproposal_invalid_block() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; @@ -309,12 +317,13 @@ async fn leader_commit_sanity() { let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new_many(ctx).await; util.set_view(util.owner_as_view_leader()); - let leader_commit = util.new_procedural_leader_commit_many(ctx).await; + let leader_commit = util.new_procedural_leader_commit(ctx).await; util.process_leader_commit(ctx,leader_commit).await.unwrap(); } #[tokio::test] async fn leader_commit_sanity_yield_replica_prepare() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,1).await; let leader_commit = util.new_procedural_leader_commit(ctx).await; @@ -334,7 +343,7 @@ async fn leader_commit_sanity_yield_replica_prepare() { async fn leader_commit_invalid_leader() { let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx,2).await; - let current_view_leader = util.view_leader(util.current_replica_view()); + let current_view_leader = util.view_leader(util.consensus.replica.view); assert_ne!(current_view_leader, util.owner_key().public()); let leader_commit = util.new_rnd_leader_commit(&mut ctx.rng(),|_| {}); @@ -344,6 +353,7 @@ async fn leader_commit_invalid_leader() { #[tokio::test] async fn leader_commit_invalid_sig() { + zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let mut util = UTHarness::new(ctx,1).await; diff --git a/node/actors/bft/src/testonly/ut_harness.rs b/node/actors/bft/src/testonly/ut_harness.rs index 397d8dc1..402f63a0 100644 --- a/node/actors/bft/src/testonly/ut_harness.rs +++ b/node/actors/bft/src/testonly/ut_harness.rs @@ -24,18 +24,11 @@ use zksync_consensus_utils::pipe::DispatcherPipe; #[cfg(test)] pub(crate) struct UTHarness { pub consensus: Consensus, + pub keys: Vec, pipe: DispatcherPipe, - keys: Vec, } impl UTHarness { - /// Creates a new `UTHarness` with minimally-significant validator set size. - pub(crate) async fn new_many(ctx: &ctx::Ctx) -> UTHarness { - let num_validators = 6; - assert!(crate::misc::faulty_replicas(num_validators) > 0); - UTHarness::new(ctx,num_validators).await - } - /// Creates a new `UTHarness` with the specified validator set size. pub(crate) async fn new(ctx: &ctx::Ctx, num_validators: usize) -> UTHarness { let mut rng = ctx.rng(); @@ -53,6 +46,13 @@ impl UTHarness { UTHarness { consensus, pipe, keys } } + /// Creates a new `UTHarness` with minimally-significant validator set size. + pub(crate) async fn new_many(ctx: &ctx::Ctx) -> UTHarness { + let num_validators = 6; + assert!(crate::misc::faulty_replicas(num_validators) > 0); + UTHarness::new(ctx,num_validators).await + } + pub(crate) fn consensus_threshold(&self) -> usize { crate::misc::consensus_threshold(self.keys.len()) } @@ -62,21 +62,13 @@ impl UTHarness { } pub(crate) fn owner_as_view_leader(&self) -> ViewNumber { - let mut view = self.current_replica_view(); + let mut view = self.consensus.replica.view; while self.view_leader(view) != self.owner_key().public() { view = view.next(); } view } - pub(crate) fn key_at(&self, index: usize) -> &SecretKey { - &self.keys[index] - } - - pub(crate) fn keys(&self) -> Vec { - self.keys.clone() - } - pub(crate) fn set_view(&mut self, view: ViewNumber) { self.set_replica_view(view); self.set_leader_view(view); @@ -173,18 +165,8 @@ impl UTHarness { } pub(crate) async fn new_procedural_leader_prepare(&mut self, ctx: &ctx::Ctx) -> Signed { - let replica_prepare = self.new_current_replica_prepare(|_| {}); - self.process_replica_prepare(ctx, replica_prepare).await.unwrap().unwrap() - } - - pub(crate) async fn new_procedural_leader_prepare_many(&mut self, ctx: &ctx::Ctx) -> Signed { - let replica_prepare = self.new_current_replica_prepare(|_| {}).cast().unwrap().msg; - self.process_replica_prepare_many( - ctx, - vec![replica_prepare; self.consensus_threshold()], - self.keys(), - ) - .await.unwrap() + let replica_prepare = self.new_current_replica_prepare(|_| {}).msg; + self.process_replica_prepare_all(ctx, replica_prepare).await } pub(crate) async fn new_procedural_replica_commit(&mut self, ctx: &ctx::Ctx) -> Signed { @@ -192,23 +174,9 @@ impl UTHarness { self.process_leader_prepare(ctx,leader_prepare).await.unwrap() } - pub(crate) async fn new_procedural_replica_commit_many(&mut self, ctx: &ctx::Ctx) -> Signed { - let leader_prepare = self.new_procedural_leader_prepare_many(ctx).await; - self.process_leader_prepare(ctx,leader_prepare).await.unwrap() - } - pub(crate) async fn new_procedural_leader_commit(&mut self, ctx: &ctx::Ctx) -> Signed { let replica_commit = self.new_procedural_replica_commit(ctx).await; - self.process_replica_commit(ctx, replica_commit).await.unwrap().unwrap() - } - - pub(crate) async fn new_procedural_leader_commit_many(&mut self, ctx: &ctx::Ctx) -> Signed { - let replica_commit = self.new_procedural_replica_commit_many(ctx).await; - self.process_replica_commit_many( - ctx, - vec![replica_commit.msg; self.consensus_threshold()], - self.keys(), - ).await.unwrap() + self.process_replica_commit_all(ctx, replica_commit.msg).await } pub(crate) async fn process_leader_prepare( @@ -250,17 +218,14 @@ impl UTHarness { Ok(self.try_recv()) } - pub(crate) async fn process_replica_prepare_many( - &mut self, - ctx: &ctx::Ctx, - messages: Vec, - keys: Vec, - ) -> Option> { - let mut out = None; - for (i, (msg, key)) in messages.into_iter().zip(keys).enumerate() { - let res = self.process_replica_prepare(ctx,key.sign_msg(msg)).await; + pub(crate) async fn process_replica_prepare_all(&mut self,ctx: &ctx::Ctx, msg: ReplicaPrepare) -> Signed { + for (i, key) in self.keys.iter().enumerate() { + let res = self.consensus + .leader + .process_replica_prepare(ctx, &self.consensus.inner, key.sign_msg(msg.clone())) + .await; match (i + 1).cmp(&self.consensus_threshold()) { - Ordering::Equal => { out = Some(res.unwrap().unwrap()); }, + Ordering::Equal => { res.unwrap() }, Ordering::Less => assert_matches!( res, Err(ReplicaPrepareError::NumReceivedBelowThreshold { @@ -274,7 +239,7 @@ impl UTHarness { Ordering::Greater => assert_matches!(res, Err(ReplicaPrepareError::Old { .. })), } } - out + self.try_recv().unwrap() } pub(crate) async fn process_replica_commit( @@ -282,42 +247,32 @@ impl UTHarness { ctx: &ctx::Ctx, msg: Signed, ) -> Result>, ReplicaCommitError> { - self.consensus.leader.process_replica_commit( - &ctx, - &self.consensus.inner, - msg, - )?; + self.consensus.leader.process_replica_commit(&ctx,&self.consensus.inner,msg)?; Ok(self.try_recv()) } - pub(crate) async fn process_replica_commit_many( - &mut self, - ctx: &ctx::Ctx, - messages: Vec, - keys: Vec, - ) -> Option> { - let mut out = None; - for (i, (msg, key)) in messages.into_iter().zip(keys).enumerate() { - let res = self.process_replica_commit(ctx,key.sign_msg(msg)).await; + async fn process_replica_commit_all(&mut self, ctx: &ctx::Ctx, msg: ReplicaCommit) -> Signed { + for (i, key) in self.keys.iter().enumerate() { + let res = self.consensus.leader.process_replica_commit(&ctx,&self.consensus.inner,key.sign_msg(msg.clone())); match (i + 1).cmp(&self.consensus_threshold()) { - Ordering::Equal => { out = Some(res.unwrap().unwrap()); }, + Ordering::Equal => { res.unwrap() }, Ordering::Less => assert_matches!( res, Err(ReplicaCommitError::NumReceivedBelowThreshold { num_messages, threshold, }) => { - assert_eq!(num_messages, i); + assert_eq!(num_messages, i+1); assert_eq!(threshold, self.consensus_threshold()) } ), Ordering::Greater => assert_matches!(res, Err(ReplicaCommitError::Old { .. })), } } - out + self.try_recv().unwrap() } - pub(crate) fn try_recv>(&mut self) -> Option> { + fn try_recv>(&mut self) -> Option> { self.pipe .try_recv() .map(|message| match message { @@ -327,14 +282,6 @@ impl UTHarness { }) } - pub(crate) fn current_replica_view(&self) -> ViewNumber { - self.consensus.replica.view - } - - pub(crate) fn current_replica_phase(&self) -> Phase { - self.consensus.replica.phase - } - pub(crate) fn view_leader(&self, view: ViewNumber) -> validator::PublicKey { self.consensus.inner.view_leader(view) } @@ -342,33 +289,22 @@ impl UTHarness { pub(crate) fn new_commit_qc(&self, mutate_fn: impl FnOnce(&mut ReplicaCommit)) -> CommitQC { let validator_set = validator::ValidatorSet::new(self.keys.iter().map(|k| k.public())).unwrap(); - - let msg = self - .new_current_replica_commit(mutate_fn) - .cast() - .unwrap() - .msg; - - let signed_messages: Vec<_> = self.keys.iter().map(|sk| sk.sign_msg(msg)).collect(); - - CommitQC::from(&signed_messages, &validator_set).unwrap() + let msg = self.new_current_replica_commit(mutate_fn).msg; + let msgs: Vec<_> = self.keys.iter().map(|k| k.sign_msg(msg.clone())).collect(); + CommitQC::from(&msgs, &validator_set).unwrap() } pub(crate) fn new_prepare_qc(&self, mutate_fn: impl FnOnce(&mut ReplicaPrepare)) -> PrepareQC { let validator_set = validator::ValidatorSet::new(self.keys.iter().map(|k| k.public())).unwrap(); let msg = self.new_current_replica_prepare(mutate_fn).msg; - let signed_messages: Vec<_> = self - .keys - .iter() - .map(|sk| sk.sign_msg(msg.clone())) - .collect(); - PrepareQC::from(&signed_messages, &validator_set).unwrap() + let msgs: Vec<_> = self.keys.iter().map(|k| k.sign_msg(msg.clone())).collect(); + PrepareQC::from(&msgs, &validator_set).unwrap() } pub(crate) fn new_prepare_qc_many( &mut self, - mutate_fn: impl FnMut(&mut ReplicaPrepare), + mut mutate_fn: impl FnMut(&mut ReplicaPrepare), ) -> PrepareQC { let validator_set = validator::ValidatorSet::new(self.keys.iter().map(|k| k.public())).unwrap(); From 8529fdefc15d3cd87910ff9c55c62d6174b56b7b Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Mon, 4 Dec 2023 11:08:37 +0100 Subject: [PATCH 08/14] added validate_proposal test --- node/actors/bft/src/replica/tests.rs | 42 ++++++++++++---------- node/actors/bft/src/testonly/ut_harness.rs | 22 +++++------- node/libs/storage/src/traits.rs | 9 +++-- 3 files changed, 36 insertions(+), 37 deletions(-) diff --git a/node/actors/bft/src/replica/tests.rs b/node/actors/bft/src/replica/tests.rs index 17127923..fc30e110 100644 --- a/node/actors/bft/src/replica/tests.rs +++ b/node/actors/bft/src/replica/tests.rs @@ -6,8 +6,9 @@ use crate::{inner::ConsensusInner, leader::ReplicaPrepareError, testonly::ut_har use assert_matches::assert_matches; use rand::Rng; use zksync_consensus_roles::validator::{ + self, LeaderPrepare, Payload, ReplicaCommit, - ReplicaPrepare, ViewNumber, + ReplicaPrepare, ViewNumber, CommitQC, }; #[tokio::test] @@ -118,29 +119,34 @@ async fn leader_prepare_old_view() { ); } -/* /// Tests that `WriteBlockStore::verify_payload` is applied before signing a vote. #[tokio::test] async fn leader_prepare_invalid_payload() { - let mut util = UTHarness::new_one().await; - let leader_prepare = util.new_procedural_leader_prepare_one().await; - let mut leader_prepare = util - .new_procedural_leader_prepare_one() - .await - .cast::() - .unwrap() - .msg; - let leader_prepare = util - .owner_key() - .sign_msg(ConsensusMsg::LeaderPrepare(leader_prepare)); + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let mut util = UTHarness::new(ctx,1).await; + let leader_prepare = util.new_procedural_leader_prepare(ctx).await; + // Insert a finalized block to the storage. + // Default implementation of verify_payload() fails if + // head block number >= proposal block number. + let block = validator::FinalBlock { + header: leader_prepare.msg.proposal.clone(), + payload: leader_prepare.msg.proposal_payload.clone().unwrap(), + justification: CommitQC::from( + &[util.keys[0].sign_msg(ReplicaCommit { + protocol_version: validator::ProtocolVersion::EARLIEST, + view: util.consensus.replica.view, + proposal: leader_prepare.msg.proposal.clone(), + })], + &util.validator_set(), + ).unwrap(), + }; + util.consensus.replica.storage.put_block(ctx,&block).await.unwrap(); let res = util.process_leader_prepare(ctx,leader_prepare).await; - assert_matches!( - res, - Err(LeaderPrepareError::ProposalInvalidPayload(..)), - ); -}*/ + assert_matches!(res,Err(LeaderPrepareError::ProposalInvalidPayload(..))); +} #[tokio::test] async fn leader_prepare_invalid_sig() { diff --git a/node/actors/bft/src/testonly/ut_harness.rs b/node/actors/bft/src/testonly/ut_harness.rs index 402f63a0..c197fbf4 100644 --- a/node/actors/bft/src/testonly/ut_harness.rs +++ b/node/actors/bft/src/testonly/ut_harness.rs @@ -286,38 +286,32 @@ impl UTHarness { self.consensus.inner.view_leader(view) } + pub(crate) fn validator_set(&self) -> validator::ValidatorSet { + validator::ValidatorSet::new(self.keys.iter().map(|k| k.public())).unwrap() + } + pub(crate) fn new_commit_qc(&self, mutate_fn: impl FnOnce(&mut ReplicaCommit)) -> CommitQC { - let validator_set = - validator::ValidatorSet::new(self.keys.iter().map(|k| k.public())).unwrap(); let msg = self.new_current_replica_commit(mutate_fn).msg; let msgs: Vec<_> = self.keys.iter().map(|k| k.sign_msg(msg.clone())).collect(); - CommitQC::from(&msgs, &validator_set).unwrap() + CommitQC::from(&msgs, &self.validator_set()).unwrap() } pub(crate) fn new_prepare_qc(&self, mutate_fn: impl FnOnce(&mut ReplicaPrepare)) -> PrepareQC { - let validator_set = - validator::ValidatorSet::new(self.keys.iter().map(|k| k.public())).unwrap(); let msg = self.new_current_replica_prepare(mutate_fn).msg; let msgs: Vec<_> = self.keys.iter().map(|k| k.sign_msg(msg.clone())).collect(); - PrepareQC::from(&msgs, &validator_set).unwrap() + PrepareQC::from(&msgs, &self.validator_set()).unwrap() } pub(crate) fn new_prepare_qc_many( &mut self, mut mutate_fn: impl FnMut(&mut ReplicaPrepare), ) -> PrepareQC { - let validator_set = - validator::ValidatorSet::new(self.keys.iter().map(|k| k.public())).unwrap(); - - let signed_messages: Vec<_> = self - .keys - .iter() + let msgs: Vec<_> = self.keys.iter() .map(|sk| { let msg = self.new_current_replica_prepare(|msg| mutate_fn(msg)).msg; sk.sign_msg(msg) }) .collect(); - - PrepareQC::from(&signed_messages, &validator_set).unwrap() + PrepareQC::from(&msgs, &self.validator_set()).unwrap() } } diff --git a/node/libs/storage/src/traits.rs b/node/libs/storage/src/traits.rs index b2b184eb..20bb2c64 100644 --- a/node/libs/storage/src/traits.rs +++ b/node/libs/storage/src/traits.rs @@ -86,12 +86,11 @@ pub trait WriteBlockStore: BlockStore { &self, ctx: &ctx::Ctx, block_number: BlockNumber, - payload: &Payload, + _payload: &Payload, ) -> ctx::Result<()> { - if let Some(block) = self.block(ctx,block_number).await? { - if &block.payload!=payload { - Err(anyhow::anyhow!("block already stored with different payload"))?; - } + let head_number = self.head_block(ctx).await?.header.number; + if head_number >= block_number { + Err(anyhow::anyhow!("received proposal for block {block_number:?}, while head is at {head_number:?}"))?; } Ok(()) } From f13fae39d1f12192de4834a1f48c0ccbbf0dc911 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Mon, 4 Dec 2023 12:52:10 +0100 Subject: [PATCH 09/14] cargo fmt --- node/Cargo.lock | 1 + node/actors/bft/Cargo.toml | 1 + node/actors/bft/src/leader/state_machine.rs | 5 +- node/actors/bft/src/leader/tests.rs | 292 +++++++++--------- node/actors/bft/src/lib.rs | 6 +- node/actors/bft/src/replica/leader_prepare.rs | 6 +- node/actors/bft/src/replica/tests.rs | 241 +++++++++------ node/actors/bft/src/testonly/ut_harness.rs | 144 +++++---- node/libs/storage/src/replica_state.rs | 9 +- node/libs/storage/src/traits.rs | 4 +- 10 files changed, 380 insertions(+), 329 deletions(-) diff --git a/node/Cargo.lock b/node/Cargo.lock index 76ae25f4..8c44cd98 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -2599,6 +2599,7 @@ dependencies = [ "assert_matches", "async-trait", "once_cell", + "pretty_assertions", "rand 0.8.5", "thiserror", "tokio", diff --git a/node/actors/bft/Cargo.toml b/node/actors/bft/Cargo.toml index 5cd22515..5e0f6211 100644 --- a/node/actors/bft/Cargo.toml +++ b/node/actors/bft/Cargo.toml @@ -26,3 +26,4 @@ vise.workspace = true [dev-dependencies] tokio.workspace = true assert_matches.workspace = true +pretty_assertions.workspace = true diff --git a/node/actors/bft/src/leader/state_machine.rs b/node/actors/bft/src/leader/state_machine.rs index 37505d40..39d8b8e9 100644 --- a/node/actors/bft/src/leader/state_machine.rs +++ b/node/actors/bft/src/leader/state_machine.rs @@ -5,7 +5,7 @@ use std::{ unreachable, }; use tracing::instrument; -use zksync_concurrency::{ctx, metrics::LatencyHistogramExt as _, time, error::Wrap as _}; +use zksync_concurrency::{ctx, error::Wrap as _, metrics::LatencyHistogramExt as _, time}; use zksync_consensus_roles::validator; /// The StateMachine struct contains the state of the leader. This is a simple state machine. We just store @@ -66,7 +66,8 @@ impl StateMachine { validator::ConsensusMsg::ReplicaPrepare(_) => { let res = match self .process_replica_prepare(ctx, consensus, input.cast().unwrap()) - .await.wrap("process_replica_prepare()") + .await + .wrap("process_replica_prepare()") { Ok(()) => Ok(()), Err(super::replica_prepare::Error::Internal(err)) => { diff --git a/node/actors/bft/src/leader/tests.rs b/node/actors/bft/src/leader/tests.rs index 19f10e8e..a394ffa2 100644 --- a/node/actors/bft/src/leader/tests.rs +++ b/node/actors/bft/src/leader/tests.rs @@ -1,46 +1,45 @@ use super::{ replica_commit::Error as ReplicaCommitError, replica_prepare::Error as ReplicaPrepareError, }; -use zksync_concurrency::ctx; use crate::testonly::ut_harness::UTHarness; use assert_matches::assert_matches; +use pretty_assertions::assert_eq; use rand::Rng; -use zksync_consensus_roles::validator::{ - self, LeaderCommit, LeaderPrepare, Phase, PrepareQC, ProtocolVersion, - ViewNumber, -}; +use zksync_concurrency::ctx; +use zksync_consensus_roles::validator::{self, LeaderCommit, Phase, ProtocolVersion, ViewNumber}; #[tokio::test] async fn replica_prepare_sanity() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new_many(ctx).await; - util.set_view(util.owner_as_view_leader()); - util.new_procedural_leader_prepare(ctx).await; + util.new_leader_prepare(ctx).await; } #[tokio::test] async fn replica_prepare_sanity_yield_leader_prepare() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; + let mut util = UTHarness::new(ctx, 1).await; - let replica_prepare = util.new_current_replica_prepare(|_| {}); - let leader_prepare = util.process_replica_prepare(ctx,replica_prepare.clone()).await.unwrap().unwrap(); - assert_matches!( - leader_prepare.msg, - LeaderPrepare { - protocol_version, - view, - proposal, - justification, - .. - } => { - assert_eq!(protocol_version, replica_prepare.msg.protocol_version); - assert_eq!(view, replica_prepare.msg.view); - assert_eq!(proposal.parent, replica_prepare.msg.high_vote.proposal.hash()); - assert_eq!(justification, util.new_prepare_qc(|msg| *msg = replica_prepare.msg)); - } + let replica_prepare = util.new_replica_prepare(|_| {}); + let leader_prepare = util + .process_replica_prepare(ctx, replica_prepare.clone()) + .await + .unwrap() + .unwrap(); + assert_eq!( + leader_prepare.msg.protocol_version, + replica_prepare.msg.protocol_version + ); + assert_eq!(leader_prepare.msg.view, replica_prepare.msg.view); + assert_eq!( + leader_prepare.msg.proposal.parent, + replica_prepare.msg.high_vote.proposal.hash() + ); + assert_eq!( + leader_prepare.msg.justification, + util.new_prepare_qc(|msg| *msg = replica_prepare.msg) ); } @@ -49,46 +48,38 @@ async fn replica_prepare_sanity_yield_leader_prepare_reproposal() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new_many(ctx).await; - util.set_view(util.owner_as_view_leader()); - - let replica_prepare = util.new_unfinalized_replica_prepare().msg; - let leader_prepare = util.process_replica_prepare_all(ctx, replica_prepare.clone()).await; - - assert_matches!( - leader_prepare.msg, - LeaderPrepare { - protocol_version, - view, - proposal, - proposal_payload, - justification, - } => { - assert_eq!(protocol_version, replica_prepare.protocol_version); - assert_eq!(view, replica_prepare.view); - assert_eq!(proposal, replica_prepare.high_vote.proposal); - assert_eq!(proposal_payload, None); - assert_matches!( - justification, - PrepareQC { map, .. } => { - assert_eq!(map.len(), 1); - assert_eq!(*map.first_key_value().unwrap().0, replica_prepare); - } - ); - } + util.new_replica_commit(ctx).await; + util.replica_timeout(); + let replica_prepare = util.new_replica_prepare(|_| {}).msg; + let leader_prepare = util + .process_replica_prepare_all(ctx, replica_prepare.clone()) + .await; + + assert_eq!( + leader_prepare.msg.protocol_version, + replica_prepare.protocol_version ); + assert_eq!(leader_prepare.msg.view, replica_prepare.view); + assert_eq!( + leader_prepare.msg.proposal, + replica_prepare.high_vote.proposal + ); + assert_eq!(leader_prepare.msg.proposal_payload, None); + let map = leader_prepare.msg.justification.map; + assert_eq!(map.len(), 1); + assert_eq!(*map.first_key_value().unwrap().0, replica_prepare); } #[tokio::test] async fn replica_prepare_old_view() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - util.set_replica_view(ViewNumber(1)); - util.set_leader_view(ViewNumber(2)); - util.set_leader_phase(Phase::Prepare); + let mut util = UTHarness::new(ctx, 1).await; - let replica_prepare = util.new_current_replica_prepare(|_| {}); - let res = util.process_replica_prepare(ctx,replica_prepare).await; + let replica_prepare = util.new_replica_prepare(|_| {}); + util.consensus.leader.view = util.consensus.leader.view.next(); + util.consensus.leader.phase = Phase::Prepare; + let res = util.process_replica_prepare(ctx, replica_prepare).await; assert_matches!( res, Err(ReplicaPrepareError::Old { @@ -102,11 +93,11 @@ async fn replica_prepare_old_view() { async fn replica_prepare_during_commit() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - util.set_leader_phase(Phase::Commit); + let mut util = UTHarness::new(ctx, 1).await; + util.consensus.leader.phase = Phase::Commit; - let replica_prepare = util.new_current_replica_prepare(|_| {}); - let res = util.process_replica_prepare(ctx,replica_prepare).await; + let replica_prepare = util.new_replica_prepare(|_| {}); + let res = util.process_replica_prepare(ctx, replica_prepare).await; assert_matches!( res, Err(ReplicaPrepareError::Old { @@ -120,27 +111,29 @@ async fn replica_prepare_during_commit() { async fn replica_prepare_not_leader_in_view() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,2).await; - let current_view_leader = util.view_leader(util.consensus.replica.view); - assert_ne!(current_view_leader, util.owner_key().public()); - - let replica_prepare = util.new_current_replica_prepare(|_| {}); - let res = util.process_replica_prepare(ctx,replica_prepare).await; + let mut util = UTHarness::new(ctx, 2).await; + let replica_prepare = util.new_replica_prepare(|msg| { + // Moving to the next view changes the leader. + msg.view = msg.view.next(); + }); + let res = util.process_replica_prepare(ctx, replica_prepare).await; assert_matches!(res, Err(ReplicaPrepareError::NotLeaderInView)); } #[tokio::test] async fn replica_prepare_already_exists() { let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,2).await; - let view = ViewNumber(2); - util.set_replica_view(view); - util.set_leader_view(view); - assert_eq!(util.view_leader(view), util.owner_key().public()); + let mut util = UTHarness::new(ctx, 2).await; - let replica_prepare = util.new_current_replica_prepare(|_| {}); - assert!(util.process_replica_prepare(ctx,replica_prepare.clone()).await.is_err()); - let res = util.process_replica_prepare(ctx,replica_prepare.clone()).await; + util.set_owner_as_view_leader(); + let replica_prepare = util.new_replica_prepare(|_| {}); + assert!(util + .process_replica_prepare(ctx, replica_prepare.clone()) + .await + .is_err()); + let res = util + .process_replica_prepare(ctx, replica_prepare.clone()) + .await; assert_matches!( res, Err(ReplicaPrepareError::Exists { existing_message }) => { @@ -154,13 +147,10 @@ async fn replica_prepare_num_received_below_threshold() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new(ctx, 2).await; - let view = ViewNumber(2); - util.set_replica_view(view); - util.set_leader_view(view); - assert_eq!(util.view_leader(view), util.owner_key().public()); - let replica_prepare = util.new_current_replica_prepare(|_| {}); - let res = util.process_replica_prepare(ctx,replica_prepare).await; + util.set_owner_as_view_leader(); + let replica_prepare = util.new_replica_prepare(|_| {}); + let res = util.process_replica_prepare(ctx, replica_prepare).await; assert_matches!( res, Err(ReplicaPrepareError::NumReceivedBelowThreshold { @@ -174,10 +164,10 @@ async fn replica_prepare_num_received_below_threshold() { async fn replica_prepare_invalid_sig() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - let mut replica_prepare = util.new_current_replica_prepare(|_| {}); + let mut util = UTHarness::new(ctx, 1).await; + let mut replica_prepare = util.new_replica_prepare(|_| {}); replica_prepare.sig = ctx.rng().gen(); - let res = util.process_replica_prepare(ctx,replica_prepare).await; + let res = util.process_replica_prepare(ctx, replica_prepare).await; assert_matches!(res, Err(ReplicaPrepareError::InvalidSignature(_))); } @@ -185,8 +175,8 @@ async fn replica_prepare_invalid_sig() { async fn replica_prepare_invalid_commit_qc() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - let replica_prepare = util.new_current_replica_prepare(|msg| msg.high_qc = ctx.rng().gen()); + let mut util = UTHarness::new(ctx, 1).await; + let replica_prepare = util.new_replica_prepare(|msg| msg.high_qc = ctx.rng().gen()); let res = util.process_replica_prepare(ctx, replica_prepare).await; assert_matches!(res, Err(ReplicaPrepareError::InvalidHighQC(..))); } @@ -195,13 +185,13 @@ async fn replica_prepare_invalid_commit_qc() { async fn replica_prepare_high_qc_of_current_view() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; + let mut util = UTHarness::new(ctx, 1).await; let view = ViewNumber(1); let qc_view = ViewNumber(1); util.set_view(view); let qc = util.new_commit_qc(|msg| msg.view = qc_view); - let replica_prepare = util.new_current_replica_prepare(|msg| msg.high_qc = qc); - let res = util.process_replica_prepare(ctx,replica_prepare).await; + let replica_prepare = util.new_replica_prepare(|msg| msg.high_qc = qc); + let res = util.process_replica_prepare(ctx, replica_prepare).await; assert_matches!( res, Err(ReplicaPrepareError::HighQCOfFutureView { high_qc_view, current_view }) => { @@ -215,14 +205,14 @@ async fn replica_prepare_high_qc_of_current_view() { async fn replica_prepare_high_qc_of_future_view() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; + let mut util = UTHarness::new(ctx, 1).await; let view = ViewNumber(1); let qc_view = ViewNumber(2); util.set_view(view); let qc = util.new_commit_qc(|msg| msg.view = qc_view); - let replica_prepare = util.new_current_replica_prepare(|msg| msg.high_qc = qc); - let res = util.process_replica_prepare(ctx,replica_prepare).await; + let replica_prepare = util.new_replica_prepare(|msg| msg.high_qc = qc); + let res = util.process_replica_prepare(ctx, replica_prepare).await; assert_matches!( res, Err(ReplicaPrepareError::HighQCOfFutureView{ high_qc_view, current_view }) => { @@ -237,20 +227,17 @@ async fn replica_prepare_high_qc_of_future_view() { async fn replica_prepare_non_validator_signer() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,2).await; + let mut util = UTHarness::new(ctx, 2).await; - let view = ViewNumber(2); - util.set_view(view); - assert_eq!(util.view_leader(view), util.keys[0].public()); - let replica_prepare = util.new_current_replica_prepare(|_| {}); + let replica_prepare = util.new_replica_prepare(|_| {}); assert!(util - .process_replica_prepare(ctx,replica_prepare.clone()) + .process_replica_prepare(ctx, replica_prepare.clone()) .await .is_err()); let non_validator: validator::SecretKey = ctx.rng().gen(); let replica_prepare = non_validator.sign_msg(replica_prepare.msg); - util.process_replica_prepare(ctx,replica_prepare) + util.process_replica_prepare(ctx, replica_prepare) .await .unwrap(); // PANICS: @@ -262,17 +249,20 @@ async fn replica_commit_sanity() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new_many(ctx).await; - util.set_view(util.owner_as_view_leader()); - util.new_procedural_leader_commit(ctx).await; + util.new_leader_commit(ctx).await; } #[tokio::test] async fn replica_commit_sanity_yield_leader_commit() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - let replica_commit = util.new_procedural_replica_commit(ctx).await; - let leader_commit = util.process_replica_commit(ctx,replica_commit.clone()).await.unwrap().unwrap(); + let mut util = UTHarness::new(ctx, 1).await; + let replica_commit = util.new_replica_commit(ctx).await; + let leader_commit = util + .process_replica_commit(ctx, replica_commit.clone()) + .await + .unwrap() + .unwrap(); assert_matches!( leader_commit.msg, LeaderCommit { @@ -289,11 +279,11 @@ async fn replica_commit_sanity_yield_leader_commit() { async fn replica_commit_old() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - let mut replica_commit = util.new_procedural_replica_commit(ctx).await.msg; + let mut util = UTHarness::new(ctx, 1).await; + let mut replica_commit = util.new_replica_commit(ctx).await.msg; replica_commit.view = util.consensus.replica.view.prev(); let replica_commit = util.owner_key().sign_msg(replica_commit); - let res = util.process_replica_commit(ctx,replica_commit).await; + let res = util.process_replica_commit(ctx, replica_commit).await; assert_matches!( res, Err(ReplicaCommitError::Old { current_view, current_phase }) => { @@ -306,13 +296,13 @@ async fn replica_commit_old() { #[tokio::test] async fn replica_commit_not_leader_in_view() { let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,2).await; + let mut util = UTHarness::new(ctx, 2).await; let current_view_leader = util.view_leader(util.consensus.replica.view); assert_ne!(current_view_leader, util.owner_key().public()); let replica_commit = util.new_current_replica_commit(|_| {}); - let res = util.process_replica_commit(ctx,replica_commit).await; + let res = util.process_replica_commit(ctx, replica_commit).await; assert_matches!(res, Err(ReplicaCommitError::NotLeaderInView)); } @@ -320,20 +310,20 @@ async fn replica_commit_not_leader_in_view() { async fn replica_commit_already_exists() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,2).await; + let mut util = UTHarness::new(ctx, 2).await; let view = ViewNumber(2); util.set_replica_view(view); util.set_leader_view(view); assert_eq!(util.view_leader(view), util.owner_key().public()); - - let replica_prepare1 = util.new_current_replica_prepare(|_| {}); - assert!(util.process_replica_prepare(ctx,replica_prepare1.clone()).await.is_err()); - let replica_prepare2 = util.keys[1].sign_msg(replica_prepare1.msg); - let leader_prepare = util.process_replica_prepare(ctx,replica_prepare2).await.unwrap().unwrap(); - let replica_commit = util.process_leader_prepare(ctx,leader_prepare).await.unwrap(); - assert!(util.process_replica_commit(ctx,replica_commit.clone()).await.is_err()); - let res = util.process_replica_commit(ctx,replica_commit.clone()).await; + let replica_commit = util.new_replica_commit(ctx).await; + assert!(util + .process_replica_commit(ctx, replica_commit.clone()) + .await + .is_err()); + let res = util + .process_replica_commit(ctx, replica_commit.clone()) + .await; assert_matches!( res, Err(ReplicaCommitError::DuplicateMessage { existing_message }) => { @@ -346,19 +336,26 @@ async fn replica_commit_already_exists() { async fn replica_commit_num_received_below_threshold() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,2).await; - - let view = ViewNumber(2); - util.set_replica_view(view); - util.set_leader_view(view); - assert_eq!(util.view_leader(view), util.owner_key().public()); + let mut util = UTHarness::new(ctx, 2).await; - let replica_prepare = util.new_current_replica_prepare(|_| {}); - assert!(util.process_replica_prepare(ctx,replica_prepare.clone()).await.is_err()); + let replica_prepare = util.new_replica_prepare(|_| {}); + assert!(util + .process_replica_prepare(ctx, replica_prepare.clone()) + .await + .is_err()); let replica_prepare = util.keys[1].sign_msg(replica_prepare.msg); - let leader_prepare = util.process_replica_prepare(ctx,replica_prepare).await.unwrap().unwrap(); - let replica_commit = util.process_leader_prepare(ctx,leader_prepare).await.unwrap(); - let res = util.process_replica_commit(ctx,replica_commit.clone()).await; + let leader_prepare = util + .process_replica_prepare(ctx, replica_prepare) + .await + .unwrap() + .unwrap(); + let replica_commit = util + .process_leader_prepare(ctx, leader_prepare) + .await + .unwrap(); + let res = util + .process_replica_commit(ctx, replica_commit.clone()) + .await; assert_matches!( res, Err(ReplicaCommitError::NumReceivedBelowThreshold { @@ -372,10 +369,10 @@ async fn replica_commit_num_received_below_threshold() { async fn replica_commit_invalid_sig() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; + let mut util = UTHarness::new(ctx, 1).await; let mut replica_commit = util.new_current_replica_commit(|_| {}); replica_commit.sig = ctx.rng().gen(); - let res = util.process_replica_commit(ctx,replica_commit).await; + let res = util.process_replica_commit(ctx, replica_commit).await; assert_matches!(res, Err(ReplicaCommitError::InvalidSignature(..))); } @@ -383,9 +380,9 @@ async fn replica_commit_invalid_sig() { async fn replica_commit_unexpected_proposal() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; + let mut util = UTHarness::new(ctx, 1).await; let replica_commit = util.new_current_replica_commit(|_| {}); - let res = util.process_replica_commit(ctx,replica_commit).await; + let res = util.process_replica_commit(ctx, replica_commit).await; assert_matches!(res, Err(ReplicaCommitError::UnexpectedProposal)); } @@ -394,27 +391,34 @@ async fn replica_commit_unexpected_proposal() { async fn replica_commit_protocol_version_mismatch() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,2).await; - - let view = ViewNumber(2); - util.set_replica_view(view); - util.set_leader_view(view); - assert_eq!(util.view_leader(view), util.owner_key().public()); + let mut util = UTHarness::new(ctx, 2).await; - let replica_prepare = util.new_current_replica_prepare(|_| {}); + let replica_prepare = util.new_replica_prepare(|_| {}); assert!(util - .process_replica_prepare(ctx,replica_prepare.clone()) + .process_replica_prepare(ctx, replica_prepare.clone()) .await .is_err()); let replica_prepare = util.keys[1].sign_msg(replica_prepare.msg); - let leader_prepare = util.process_replica_prepare(ctx,replica_prepare).await.unwrap().unwrap(); - let replica_commit = util.process_leader_prepare(ctx,leader_prepare).await.unwrap(); - assert!(util.process_replica_commit(ctx,replica_commit.clone()).await.is_err()); + let leader_prepare = util + .process_replica_prepare(ctx, replica_prepare) + .await + .unwrap() + .unwrap(); + let replica_commit = util + .process_leader_prepare(ctx, leader_prepare) + .await + .unwrap(); + assert!(util + .process_replica_commit(ctx, replica_commit.clone()) + .await + .is_err()); let mut replica_commit = replica_commit.msg; replica_commit.protocol_version = ProtocolVersion(replica_commit.protocol_version.0 + 1); let replica_commit = util.keys[1].sign_msg(replica_commit); - util.process_replica_commit(ctx,replica_commit).await.unwrap(); + util.process_replica_commit(ctx, replica_commit) + .await + .unwrap(); // PANICS: // "Couldn't create justification from valid replica messages!: CommitQC can only be created from votes for the same message." } diff --git a/node/actors/bft/src/lib.rs b/node/actors/bft/src/lib.rs index b4e83940..046fb357 100644 --- a/node/actors/bft/src/lib.rs +++ b/node/actors/bft/src/lib.rs @@ -132,15 +132,13 @@ impl Consensus { .await } }; - + // Notify network actor that the message has been processed. // Ignore sending error. let _ = req.ack.send(()); res } - None => { - self.replica.process_input(ctx, &self.inner, None).await - } + None => self.replica.process_input(ctx, &self.inner, None).await, }; if let Err(err) = res { return match err { diff --git a/node/actors/bft/src/replica/leader_prepare.rs b/node/actors/bft/src/replica/leader_prepare.rs index 74ec0fee..cadf4c2b 100644 --- a/node/actors/bft/src/replica/leader_prepare.rs +++ b/node/actors/bft/src/replica/leader_prepare.rs @@ -252,7 +252,11 @@ impl StateMachine { } // Payload should be valid. - if let Err(err) = self.storage.verify_payload(ctx, message.proposal.number, payload).await { + if let Err(err) = self + .storage + .verify_payload(ctx, message.proposal.number, payload) + .await + { return Err(match err { err @ ctx::Error::Canceled(_) => Error::Internal(err), ctx::Error::Internal(err) => Error::ProposalInvalidPayload(err), diff --git a/node/actors/bft/src/replica/tests.rs b/node/actors/bft/src/replica/tests.rs index fc30e110..35aff878 100644 --- a/node/actors/bft/src/replica/tests.rs +++ b/node/actors/bft/src/replica/tests.rs @@ -1,14 +1,12 @@ use super::{ leader_commit::Error as LeaderCommitError, leader_prepare::Error as LeaderPrepareError, }; -use zksync_concurrency::ctx; use crate::{inner::ConsensusInner, leader::ReplicaPrepareError, testonly::ut_harness::UTHarness}; use assert_matches::assert_matches; use rand::Rng; +use zksync_concurrency::ctx; use zksync_consensus_roles::validator::{ - self, - LeaderPrepare, Payload, ReplicaCommit, - ReplicaPrepare, ViewNumber, CommitQC, + self, CommitQC, Payload, PrepareQC, ReplicaCommit, ReplicaPrepare, ViewNumber, }; #[tokio::test] @@ -16,9 +14,10 @@ async fn leader_prepare_sanity() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new_many(ctx).await; - util.set_view(util.owner_as_view_leader()); - let leader_prepare = util.new_procedural_leader_prepare(ctx).await; - util.process_leader_prepare(ctx,leader_prepare).await.unwrap(); + let leader_prepare = util.new_leader_prepare(ctx).await; + util.process_leader_prepare(ctx, leader_prepare) + .await + .unwrap(); } #[tokio::test] @@ -26,18 +25,11 @@ async fn leader_prepare_reproposal_sanity() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new_many(ctx).await; - util.set_view(util.owner_as_view_leader()); - let replica_prepare = util.new_unfinalized_replica_prepare().msg; - let leader_prepare = util.process_replica_prepare_all(ctx,replica_prepare.clone()).await; - - assert_matches!( - &leader_prepare.msg, - LeaderPrepare {proposal_payload, .. } => { - assert!(proposal_payload.is_none()); - } - ); - - util.process_leader_prepare(ctx,leader_prepare) + util.new_replica_commit(ctx).await; + util.replica_timeout(); + let leader_prepare = util.new_leader_prepare(ctx).await; + assert!(leader_prepare.msg.proposal_payload.is_none()); + util.process_leader_prepare(ctx, leader_prepare) .await .unwrap(); } @@ -46,10 +38,11 @@ async fn leader_prepare_reproposal_sanity() { async fn leader_prepare_sanity_yield_replica_commit() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; + let mut util = UTHarness::new(ctx, 1).await; - let leader_prepare = util.new_procedural_leader_prepare(ctx).await; - let replica_commit = util.process_leader_prepare(ctx,leader_prepare.clone()) + let leader_prepare = util.new_leader_prepare(ctx).await; + let replica_commit = util + .process_leader_prepare(ctx, leader_prepare.clone()) .await .unwrap(); assert_eq!( @@ -66,14 +59,16 @@ async fn leader_prepare_sanity_yield_replica_commit() { async fn leader_prepare_invalid_leader() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,2).await; + let mut util = UTHarness::new(ctx, 2).await; let view = ViewNumber(2); util.set_view(view); assert_eq!(util.view_leader(view), util.keys[0].public()); - let replica_prepare = util.new_current_replica_prepare(|_| {}); - let res = util.process_replica_prepare(ctx,replica_prepare.clone()).await; + let replica_prepare = util.new_replica_prepare(|_| {}); + let res = util + .process_replica_prepare(ctx, replica_prepare.clone()) + .await; assert_matches!( res, Err(ReplicaPrepareError::NumReceivedBelowThreshold { @@ -83,15 +78,17 @@ async fn leader_prepare_invalid_leader() { ); let replica_prepare = util.keys[1].sign_msg(replica_prepare.msg); - let mut leader_prepare = util.process_replica_prepare(ctx,replica_prepare).await.unwrap().unwrap().msg; + let mut leader_prepare = util + .process_replica_prepare(ctx, replica_prepare) + .await + .unwrap() + .unwrap() + .msg; leader_prepare.view = leader_prepare.view.next(); - assert_ne!( - util.view_leader(leader_prepare.view), - util.keys[0].public() - ); + assert_ne!(util.view_leader(leader_prepare.view), util.keys[0].public()); let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx,leader_prepare).await; + let res = util.process_leader_prepare(ctx, leader_prepare).await; assert_matches!( res, Err(LeaderPrepareError::InvalidLeader { correct_leader, received_leader }) => { @@ -105,11 +102,11 @@ async fn leader_prepare_invalid_leader() { async fn leader_prepare_old_view() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; + let mut util = UTHarness::new(ctx, 1).await; + let mut leader_prepare = util.new_leader_prepare(ctx).await.msg; leader_prepare.view = util.consensus.replica.view.prev(); let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx,leader_prepare).await; + let res = util.process_leader_prepare(ctx, leader_prepare).await; assert_matches!( res, Err(LeaderPrepareError::Old { current_view, current_phase }) => { @@ -124,77 +121,82 @@ async fn leader_prepare_old_view() { async fn leader_prepare_invalid_payload() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - let leader_prepare = util.new_procedural_leader_prepare(ctx).await; + let mut util = UTHarness::new(ctx, 1).await; + let leader_prepare = util.new_leader_prepare(ctx).await; // Insert a finalized block to the storage. // Default implementation of verify_payload() fails if // head block number >= proposal block number. let block = validator::FinalBlock { - header: leader_prepare.msg.proposal.clone(), + header: leader_prepare.msg.proposal, payload: leader_prepare.msg.proposal_payload.clone().unwrap(), justification: CommitQC::from( &[util.keys[0].sign_msg(ReplicaCommit { protocol_version: validator::ProtocolVersion::EARLIEST, view: util.consensus.replica.view, - proposal: leader_prepare.msg.proposal.clone(), + proposal: leader_prepare.msg.proposal, })], &util.validator_set(), - ).unwrap(), + ) + .unwrap(), }; - util.consensus.replica.storage.put_block(ctx,&block).await.unwrap(); + util.consensus + .replica + .storage + .put_block(ctx, &block) + .await + .unwrap(); - let res = util.process_leader_prepare(ctx,leader_prepare).await; - assert_matches!(res,Err(LeaderPrepareError::ProposalInvalidPayload(..))); + let res = util.process_leader_prepare(ctx, leader_prepare).await; + assert_matches!(res, Err(LeaderPrepareError::ProposalInvalidPayload(..))); } #[tokio::test] async fn leader_prepare_invalid_sig() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - let mut leader_prepare = util.new_rnd_leader_prepare(&mut ctx.rng(),|_| {}); + let mut util = UTHarness::new(ctx, 1).await; + let mut leader_prepare = util.new_rnd_leader_prepare(&mut ctx.rng(), |_| {}); leader_prepare.sig = ctx.rng().gen(); - let res = util.process_leader_prepare(ctx,leader_prepare).await; + let res = util.process_leader_prepare(ctx, leader_prepare).await; assert_matches!(res, Err(LeaderPrepareError::InvalidSignature(..))); } #[tokio::test] async fn leader_prepare_invalid_prepare_qc() { let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; + let mut util = UTHarness::new(ctx, 1).await; + let mut leader_prepare = util.new_leader_prepare(ctx).await.msg; leader_prepare.justification = ctx.rng().gen(); let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx,leader_prepare).await; - assert_matches!( - res, - Err(LeaderPrepareError::InvalidPrepareQC(_)) - ); + let res = util.process_leader_prepare(ctx, leader_prepare).await; + assert_matches!(res, Err(LeaderPrepareError::InvalidPrepareQC(_))); } #[tokio::test] async fn leader_prepare_invalid_high_qc() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; + let mut util = UTHarness::new(ctx, 1).await; + let mut leader_prepare = util.new_leader_prepare(ctx).await.msg; leader_prepare.justification = util.new_prepare_qc(|msg| msg.high_qc = ctx.rng().gen()); let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx,leader_prepare).await; + let res = util.process_leader_prepare(ctx, leader_prepare).await; assert_matches!(res, Err(LeaderPrepareError::InvalidHighQC(_))); } #[tokio::test] async fn leader_prepare_proposal_oversized_payload() { let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; + let mut util = UTHarness::new(ctx, 1).await; let payload_oversize = ConsensusInner::PAYLOAD_MAX_SIZE + 1; let payload_vec = vec![0; payload_oversize]; - let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; + let mut leader_prepare = util.new_leader_prepare(ctx).await.msg; leader_prepare.proposal_payload = Some(Payload(payload_vec)); let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx,leader_prepare.clone()).await; + let res = util + .process_leader_prepare(ctx, leader_prepare.clone()) + .await; assert_matches!( res, Err(LeaderPrepareError::ProposalOversizedPayload{ payload_size, header }) => { @@ -208,11 +210,11 @@ async fn leader_prepare_proposal_oversized_payload() { async fn leader_prepare_proposal_mismatched_payload() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; + let mut util = UTHarness::new(ctx, 1).await; + let mut leader_prepare = util.new_leader_prepare(ctx).await.msg; leader_prepare.proposal_payload = Some(ctx.rng().gen()); let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx,leader_prepare).await; + let res = util.process_leader_prepare(ctx, leader_prepare).await; assert_matches!(res, Err(LeaderPrepareError::ProposalMismatchedPayload)); } @@ -220,12 +222,17 @@ async fn leader_prepare_proposal_mismatched_payload() { async fn leader_prepare_proposal_when_previous_not_finalized() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - let replica_prepare = util.new_current_replica_prepare(|_| {}); - let mut leader_prepare = util.process_replica_prepare(ctx,replica_prepare).await.unwrap().unwrap().msg; + let mut util = UTHarness::new(ctx, 1).await; + let replica_prepare = util.new_replica_prepare(|_| {}); + let mut leader_prepare = util + .process_replica_prepare(ctx, replica_prepare) + .await + .unwrap() + .unwrap() + .msg; leader_prepare.justification = util.new_prepare_qc(|msg| msg.high_vote = ctx.rng().gen()); let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx,leader_prepare).await; + let res = util.process_leader_prepare(ctx, leader_prepare).await; assert_matches!( res, Err(LeaderPrepareError::ProposalWhenPreviousNotFinalized) @@ -235,12 +242,19 @@ async fn leader_prepare_proposal_when_previous_not_finalized() { #[tokio::test] async fn leader_prepare_proposal_invalid_parent_hash() { let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - let replica_prepare = util.new_current_replica_prepare(|_| {}); - let mut leader_prepare = util.process_replica_prepare(ctx,replica_prepare.clone()).await.unwrap().unwrap().msg; + let mut util = UTHarness::new(ctx, 1).await; + let replica_prepare = util.new_replica_prepare(|_| {}); + let mut leader_prepare = util + .process_replica_prepare(ctx, replica_prepare.clone()) + .await + .unwrap() + .unwrap() + .msg; leader_prepare.proposal.parent = ctx.rng().gen(); let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx,leader_prepare.clone()).await; + let res = util + .process_leader_prepare(ctx, leader_prepare.clone()) + .await; assert_matches!( res, Err(LeaderPrepareError::ProposalInvalidParentHash { @@ -259,16 +273,23 @@ async fn leader_prepare_proposal_invalid_parent_hash() { async fn leader_prepare_proposal_non_sequential_number() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - let replica_prepare = util.new_current_replica_prepare(|_| {}); - let mut leader_prepare = util.process_replica_prepare(ctx,replica_prepare.clone()).await.unwrap().unwrap().msg; + let mut util = UTHarness::new(ctx, 1).await; + let replica_prepare = util.new_replica_prepare(|_| {}); + let mut leader_prepare = util + .process_replica_prepare(ctx, replica_prepare.clone()) + .await + .unwrap() + .unwrap() + .msg; let correct_num = replica_prepare.msg.high_vote.proposal.number.next(); assert_eq!(correct_num, leader_prepare.proposal.number); let non_seq_num = correct_num.next(); leader_prepare.proposal.number = non_seq_num; let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx,leader_prepare.clone()).await; + let res = util + .process_leader_prepare(ctx, leader_prepare.clone()) + .await; assert_matches!( res, Err(LeaderPrepareError::ProposalNonSequentialNumber { correct_number, received_number, header }) => { @@ -285,23 +306,39 @@ async fn leader_prepare_reproposal_without_quorum() { let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let mut util = UTHarness::new_many(ctx).await; - util.set_view(util.owner_as_view_leader()); - let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; - leader_prepare.justification = util.new_prepare_qc_many(|msg| msg.high_vote = rng.gen()); + let replica_prepare = util.new_replica_prepare(|_| {}).msg; + let mut leader_prepare = util + .process_replica_prepare_all(ctx, replica_prepare.clone()) + .await + .msg; + + // Turn leader_prepare into an unjustified reproposal. + let replica_prepares: Vec<_> = util + .keys + .iter() + .map(|k| { + let mut msg = replica_prepare.clone(); + msg.high_vote = rng.gen(); + k.sign_msg(msg) + }) + .collect(); + leader_prepare.justification = + PrepareQC::from(&replica_prepares, &util.validator_set()).unwrap(); leader_prepare.proposal_payload = None; - let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx,leader_prepare).await; + + let leader_prepare = util.keys[0].sign_msg(leader_prepare); + let res = util.process_leader_prepare(ctx, leader_prepare).await; assert_matches!(res, Err(LeaderPrepareError::ReproposalWithoutQuorum)); } #[tokio::test] async fn leader_prepare_reproposal_when_finalized() { let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; + let mut util = UTHarness::new(ctx, 1).await; + let mut leader_prepare = util.new_leader_prepare(ctx).await.msg; leader_prepare.proposal_payload = None; let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx,leader_prepare).await; + let res = util.process_leader_prepare(ctx, leader_prepare).await; assert_matches!(res, Err(LeaderPrepareError::ReproposalWhenFinalized)); } @@ -309,12 +346,12 @@ async fn leader_prepare_reproposal_when_finalized() { async fn leader_prepare_reproposal_invalid_block() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - let mut leader_prepare = util.new_procedural_leader_prepare(ctx).await.msg; + let mut util = UTHarness::new(ctx, 1).await; + let mut leader_prepare = util.new_leader_prepare(ctx).await.msg; leader_prepare.justification = util.new_prepare_qc(|msg| msg.high_vote = ctx.rng().gen()); leader_prepare.proposal_payload = None; let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx,leader_prepare).await; + let res = util.process_leader_prepare(ctx, leader_prepare).await; assert_matches!(res, Err(LeaderPrepareError::ReproposalInvalidBlock)); } @@ -322,24 +359,28 @@ async fn leader_prepare_reproposal_invalid_block() { async fn leader_commit_sanity() { let ctx = &ctx::test_root(&ctx::RealClock); let mut util = UTHarness::new_many(ctx).await; - util.set_view(util.owner_as_view_leader()); - let leader_commit = util.new_procedural_leader_commit(ctx).await; - util.process_leader_commit(ctx,leader_commit).await.unwrap(); + let leader_commit = util.new_leader_commit(ctx).await; + util.process_leader_commit(ctx, leader_commit) + .await + .unwrap(); } #[tokio::test] async fn leader_commit_sanity_yield_replica_prepare() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,1).await; - let leader_commit = util.new_procedural_leader_commit(ctx).await; - let replica_prepare = util.process_leader_commit(ctx,leader_commit.clone()).await.unwrap(); + let mut util = UTHarness::new(ctx, 1).await; + let leader_commit = util.new_leader_commit(ctx).await; + let replica_prepare = util + .process_leader_commit(ctx, leader_commit.clone()) + .await + .unwrap(); assert_eq!( replica_prepare.msg, ReplicaPrepare { protocol_version: leader_commit.msg.protocol_version, view: leader_commit.msg.justification.message.view.next(), - high_vote: leader_commit.msg.justification.message.clone(), + high_vote: leader_commit.msg.justification.message, high_qc: leader_commit.msg.justification, } ); @@ -348,12 +389,12 @@ async fn leader_commit_sanity_yield_replica_prepare() { #[tokio::test] async fn leader_commit_invalid_leader() { let ctx = &ctx::test_root(&ctx::RealClock); - let mut util = UTHarness::new(ctx,2).await; + let mut util = UTHarness::new(ctx, 2).await; let current_view_leader = util.view_leader(util.consensus.replica.view); assert_ne!(current_view_leader, util.owner_key().public()); - let leader_commit = util.new_rnd_leader_commit(&mut ctx.rng(),|_| {}); - let res = util.process_leader_commit(ctx,leader_commit).await; + let leader_commit = util.new_rnd_leader_commit(&mut ctx.rng(), |_| {}); + let res = util.process_leader_commit(ctx, leader_commit).await; assert_matches!(res, Err(LeaderCommitError::InvalidLeader { .. })); } @@ -362,10 +403,10 @@ async fn leader_commit_invalid_sig() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - let mut util = UTHarness::new(ctx,1).await; - let mut leader_commit = util.new_rnd_leader_commit(rng,|_| {}); + let mut util = UTHarness::new(ctx, 1).await; + let mut leader_commit = util.new_rnd_leader_commit(rng, |_| {}); leader_commit.sig = rng.gen(); - let res = util.process_leader_commit(ctx,leader_commit).await; + let res = util.process_leader_commit(ctx, leader_commit).await; assert_matches!(res, Err(LeaderCommitError::InvalidSignature { .. })); } @@ -373,8 +414,8 @@ async fn leader_commit_invalid_sig() { async fn leader_commit_invalid_commit_qc() { let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - let mut util = UTHarness::new(ctx,1).await; - let leader_commit = util.new_rnd_leader_commit(rng,|_| {}); - let res = util.process_leader_commit(ctx,leader_commit).await; + let mut util = UTHarness::new(ctx, 1).await; + let leader_commit = util.new_rnd_leader_commit(rng, |_| {}); + let res = util.process_leader_commit(ctx, leader_commit).await; assert_matches!(res, Err(LeaderCommitError::InvalidJustification { .. })); } diff --git a/node/actors/bft/src/testonly/ut_harness.rs b/node/actors/bft/src/testonly/ut_harness.rs index c197fbf4..3ffddff3 100644 --- a/node/actors/bft/src/testonly/ut_harness.rs +++ b/node/actors/bft/src/testonly/ut_harness.rs @@ -1,20 +1,19 @@ use crate::{ io::{InputMessage, OutputMessage}, leader::{ReplicaCommitError, ReplicaPrepareError}, - replica::{LeaderPrepareError, LeaderCommitError}, + replica::{LeaderCommitError, LeaderPrepareError}, Consensus, }; -use zksync_consensus_utils::enum_util::{Variant}; use assert_matches::assert_matches; -use rand::{Rng}; +use rand::Rng; use std::cmp::Ordering; use zksync_concurrency::ctx; use zksync_consensus_network::io::ConsensusInputMessage; use zksync_consensus_roles::validator::{ - self, BlockHeader, CommitQC, LeaderCommit, LeaderPrepare, Payload, Phase, - PrepareQC, ReplicaCommit, ReplicaPrepare, SecretKey, Signed, ViewNumber, + self, BlockHeader, CommitQC, LeaderCommit, LeaderPrepare, Payload, Phase, PrepareQC, + ReplicaCommit, ReplicaPrepare, SecretKey, Signed, ViewNumber, }; -use zksync_consensus_utils::pipe::DispatcherPipe; +use zksync_consensus_utils::{enum_util::Variant, pipe::DispatcherPipe}; /// `UTHarness` provides various utilities for unit tests. /// It is designed to simplify the setup and execution of test cases by encapsulating @@ -23,8 +22,8 @@ use zksync_consensus_utils::pipe::DispatcherPipe; /// It should be instantiated once for every test case. #[cfg(test)] pub(crate) struct UTHarness { - pub consensus: Consensus, - pub keys: Vec, + pub(crate) consensus: Consensus, + pub(crate) keys: Vec, pipe: DispatcherPipe, } @@ -39,18 +38,22 @@ impl UTHarness { Payload(vec![]), ); let (mut consensus, pipe) = - crate::testonly::make_consensus(&ctx, &keys[0], &val_set, &genesis).await; + crate::testonly::make_consensus(ctx, &keys[0], &val_set, &genesis).await; consensus.leader.view = ViewNumber(1); consensus.replica.view = ViewNumber(1); - UTHarness { consensus, pipe, keys } + UTHarness { + consensus, + pipe, + keys, + } } /// Creates a new `UTHarness` with minimally-significant validator set size. pub(crate) async fn new_many(ctx: &ctx::Ctx) -> UTHarness { let num_validators = 6; assert!(crate::misc::faulty_replicas(num_validators) > 0); - UTHarness::new(ctx,num_validators).await + UTHarness::new(ctx, num_validators).await } pub(crate) fn consensus_threshold(&self) -> usize { @@ -61,12 +64,12 @@ impl UTHarness { &self.consensus.inner.secret_key } - pub(crate) fn owner_as_view_leader(&self) -> ViewNumber { + pub(crate) fn set_owner_as_view_leader(&mut self) { let mut view = self.consensus.replica.view; while self.view_leader(view) != self.owner_key().public() { view = view.next(); } - view + self.consensus.replica.view = view; } pub(crate) fn set_view(&mut self, view: ViewNumber) { @@ -78,33 +81,20 @@ impl UTHarness { self.consensus.leader.view = view } - pub(crate) fn set_leader_phase(&mut self, phase: Phase) { - self.consensus.leader.phase = phase - } - pub(crate) fn set_replica_view(&mut self, view: ViewNumber) { self.consensus.replica.view = view } - pub(crate) fn new_unfinalized_replica_prepare(&self) -> Signed { - self.new_current_replica_prepare(|msg| { - let mut high_vote = ReplicaCommit { - protocol_version: validator::ProtocolVersion::EARLIEST, - view: self.consensus.replica.view.next(), - proposal: self.consensus.replica.high_qc.message.proposal, - }; - - high_vote.proposal.parent = high_vote.proposal.hash(); - high_vote.proposal.number = high_vote.proposal.number.next(); - - msg.high_vote = high_vote; - }) + pub(crate) fn replica_timeout(&mut self) { + self.consensus.replica.view = self.consensus.replica.view.next(); + self.consensus.replica.phase = Phase::Prepare; } - pub(crate) fn new_current_replica_prepare( - &self, + pub(crate) fn new_replica_prepare( + &mut self, mutate_fn: impl FnOnce(&mut ReplicaPrepare), ) -> Signed { + self.set_owner_as_view_leader(); let mut msg = ReplicaPrepare { protocol_version: validator::ProtocolVersion::EARLIEST, view: self.consensus.replica.view, @@ -164,19 +154,22 @@ impl UTHarness { self.consensus.inner.secret_key.sign_msg(msg) } - pub(crate) async fn new_procedural_leader_prepare(&mut self, ctx: &ctx::Ctx) -> Signed { - let replica_prepare = self.new_current_replica_prepare(|_| {}).msg; + pub(crate) async fn new_leader_prepare(&mut self, ctx: &ctx::Ctx) -> Signed { + let replica_prepare = self.new_replica_prepare(|_| {}).msg; self.process_replica_prepare_all(ctx, replica_prepare).await } - pub(crate) async fn new_procedural_replica_commit(&mut self, ctx: &ctx::Ctx) -> Signed { - let leader_prepare = self.new_procedural_leader_prepare(ctx).await; - self.process_leader_prepare(ctx,leader_prepare).await.unwrap() + pub(crate) async fn new_replica_commit(&mut self, ctx: &ctx::Ctx) -> Signed { + let leader_prepare = self.new_leader_prepare(ctx).await; + self.process_leader_prepare(ctx, leader_prepare) + .await + .unwrap() } - pub(crate) async fn new_procedural_leader_commit(&mut self, ctx: &ctx::Ctx) -> Signed { - let replica_commit = self.new_procedural_replica_commit(ctx).await; - self.process_replica_commit_all(ctx, replica_commit.msg).await + pub(crate) async fn new_leader_commit(&mut self, ctx: &ctx::Ctx) -> Signed { + let replica_commit = self.new_replica_commit(ctx).await; + self.process_replica_commit_all(ctx, replica_commit.msg) + .await } pub(crate) async fn process_leader_prepare( @@ -184,10 +177,9 @@ impl UTHarness { ctx: &ctx::Ctx, msg: Signed, ) -> Result, LeaderPrepareError> { - self - .consensus + self.consensus .replica - .process_leader_prepare(&ctx, &self.consensus.inner, msg) + .process_leader_prepare(ctx, &self.consensus.inner, msg) .await?; Ok(self.try_recv().unwrap()) } @@ -197,8 +189,7 @@ impl UTHarness { ctx: &ctx::Ctx, msg: Signed, ) -> Result, LeaderCommitError> { - self - .consensus + self.consensus .replica .process_leader_commit(ctx, &self.consensus.inner, msg) .await?; @@ -218,14 +209,19 @@ impl UTHarness { Ok(self.try_recv()) } - pub(crate) async fn process_replica_prepare_all(&mut self,ctx: &ctx::Ctx, msg: ReplicaPrepare) -> Signed { + pub(crate) async fn process_replica_prepare_all( + &mut self, + ctx: &ctx::Ctx, + msg: ReplicaPrepare, + ) -> Signed { for (i, key) in self.keys.iter().enumerate() { - let res = self.consensus + let res = self + .consensus .leader .process_replica_prepare(ctx, &self.consensus.inner, key.sign_msg(msg.clone())) .await; match (i + 1).cmp(&self.consensus_threshold()) { - Ordering::Equal => { res.unwrap() }, + Ordering::Equal => res.unwrap(), Ordering::Less => assert_matches!( res, Err(ReplicaPrepareError::NumReceivedBelowThreshold { @@ -247,15 +243,25 @@ impl UTHarness { ctx: &ctx::Ctx, msg: Signed, ) -> Result>, ReplicaCommitError> { - self.consensus.leader.process_replica_commit(&ctx,&self.consensus.inner,msg)?; + self.consensus + .leader + .process_replica_commit(ctx, &self.consensus.inner, msg)?; Ok(self.try_recv()) } - async fn process_replica_commit_all(&mut self, ctx: &ctx::Ctx, msg: ReplicaCommit) -> Signed { + async fn process_replica_commit_all( + &mut self, + ctx: &ctx::Ctx, + msg: ReplicaCommit, + ) -> Signed { for (i, key) in self.keys.iter().enumerate() { - let res = self.consensus.leader.process_replica_commit(&ctx,&self.consensus.inner,key.sign_msg(msg.clone())); + let res = self.consensus.leader.process_replica_commit( + ctx, + &self.consensus.inner, + key.sign_msg(msg), + ); match (i + 1).cmp(&self.consensus_threshold()) { - Ordering::Equal => { res.unwrap() }, + Ordering::Equal => res.unwrap(), Ordering::Less => assert_matches!( res, Err(ReplicaCommitError::NumReceivedBelowThreshold { @@ -272,14 +278,12 @@ impl UTHarness { self.try_recv().unwrap() } - fn try_recv>(&mut self) -> Option> { - self.pipe - .try_recv() - .map(|message| match message { - OutputMessage::Network(ConsensusInputMessage { - message, .. - }) => message.cast().unwrap(), - }) + fn try_recv>(&mut self) -> Option> { + self.pipe.try_recv().map(|message| match message { + OutputMessage::Network(ConsensusInputMessage { message, .. }) => { + message.cast().unwrap() + } + }) } pub(crate) fn view_leader(&self, view: ViewNumber) -> validator::PublicKey { @@ -292,26 +296,16 @@ impl UTHarness { pub(crate) fn new_commit_qc(&self, mutate_fn: impl FnOnce(&mut ReplicaCommit)) -> CommitQC { let msg = self.new_current_replica_commit(mutate_fn).msg; - let msgs: Vec<_> = self.keys.iter().map(|k| k.sign_msg(msg.clone())).collect(); + let msgs: Vec<_> = self.keys.iter().map(|k| k.sign_msg(msg)).collect(); CommitQC::from(&msgs, &self.validator_set()).unwrap() } - pub(crate) fn new_prepare_qc(&self, mutate_fn: impl FnOnce(&mut ReplicaPrepare)) -> PrepareQC { - let msg = self.new_current_replica_prepare(mutate_fn).msg; - let msgs: Vec<_> = self.keys.iter().map(|k| k.sign_msg(msg.clone())).collect(); - PrepareQC::from(&msgs, &self.validator_set()).unwrap() - } - - pub(crate) fn new_prepare_qc_many( + pub(crate) fn new_prepare_qc( &mut self, - mut mutate_fn: impl FnMut(&mut ReplicaPrepare), + mutate_fn: impl FnOnce(&mut ReplicaPrepare), ) -> PrepareQC { - let msgs: Vec<_> = self.keys.iter() - .map(|sk| { - let msg = self.new_current_replica_prepare(|msg| mutate_fn(msg)).msg; - sk.sign_msg(msg) - }) - .collect(); + let msg = self.new_replica_prepare(mutate_fn).msg; + let msgs: Vec<_> = self.keys.iter().map(|k| k.sign_msg(msg.clone())).collect(); PrepareQC::from(&msgs, &self.validator_set()).unwrap() } } diff --git a/node/libs/storage/src/replica_state.rs b/node/libs/storage/src/replica_state.rs index 8c85fed1..aa46e15c 100644 --- a/node/libs/storage/src/replica_state.rs +++ b/node/libs/storage/src/replica_state.rs @@ -65,8 +65,13 @@ impl ReplicaStore { } /// Verify that `payload` is a correct proposal for the block `block_number`. - pub async fn verify_payload(&self, ctx: &ctx::Ctx, block_number: validator::BlockNumber, payload: &validator::Payload) -> ctx::Result<()> { - self.blocks.verify_payload(ctx,block_number,payload).await + pub async fn verify_payload( + &self, + ctx: &ctx::Ctx, + block_number: validator::BlockNumber, + payload: &validator::Payload, + ) -> ctx::Result<()> { + self.blocks.verify_payload(ctx, block_number, payload).await } /// Puts a block into this storage. diff --git a/node/libs/storage/src/traits.rs b/node/libs/storage/src/traits.rs index 20bb2c64..c2f5e1ab 100644 --- a/node/libs/storage/src/traits.rs +++ b/node/libs/storage/src/traits.rs @@ -90,7 +90,9 @@ pub trait WriteBlockStore: BlockStore { ) -> ctx::Result<()> { let head_number = self.head_block(ctx).await?.header.number; if head_number >= block_number { - Err(anyhow::anyhow!("received proposal for block {block_number:?}, while head is at {head_number:?}"))?; + return Err(anyhow::anyhow!( + "received proposal for block {block_number:?}, while head is at {head_number:?}" + ).into()); } Ok(()) } From 30b85940c3ce0831474b3db44401f4607b980116 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Mon, 4 Dec 2023 13:06:47 +0100 Subject: [PATCH 10/14] cargo fmt --- node/libs/storage/src/traits.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/node/libs/storage/src/traits.rs b/node/libs/storage/src/traits.rs index c2f5e1ab..57687c80 100644 --- a/node/libs/storage/src/traits.rs +++ b/node/libs/storage/src/traits.rs @@ -92,7 +92,8 @@ pub trait WriteBlockStore: BlockStore { if head_number >= block_number { return Err(anyhow::anyhow!( "received proposal for block {block_number:?}, while head is at {head_number:?}" - ).into()); + ) + .into()); } Ok(()) } From 88f2d038b26ceb990437c4d70a1f715a44d0dd25 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Mon, 4 Dec 2023 14:10:50 +0100 Subject: [PATCH 11/14] removed trait implementations for Arc --- node/actors/executor/src/lib.rs | 2 +- .../actors/sync_blocks/src/peers/tests/mod.rs | 2 +- node/libs/storage/src/traits.rs | 56 +------------------ 3 files changed, 4 insertions(+), 56 deletions(-) diff --git a/node/actors/executor/src/lib.rs b/node/actors/executor/src/lib.rs index c1388bc5..33c46a5f 100644 --- a/node/actors/executor/src/lib.rs +++ b/node/actors/executor/src/lib.rs @@ -6,7 +6,7 @@ use zksync_concurrency::{ctx, net, scope}; use zksync_consensus_bft::{misc::consensus_threshold, Consensus, PayloadSource}; use zksync_consensus_network as network; use zksync_consensus_roles::{node, validator}; -use zksync_consensus_storage::{BlockStore, ReplicaStateStore, ReplicaStore, WriteBlockStore}; +use zksync_consensus_storage::{ReplicaStateStore, ReplicaStore, WriteBlockStore}; use zksync_consensus_sync_blocks::SyncBlocks; use zksync_consensus_utils::pipe; diff --git a/node/actors/sync_blocks/src/peers/tests/mod.rs b/node/actors/sync_blocks/src/peers/tests/mod.rs index 06e50b97..cf902834 100644 --- a/node/actors/sync_blocks/src/peers/tests/mod.rs +++ b/node/actors/sync_blocks/src/peers/tests/mod.rs @@ -7,7 +7,7 @@ use std::{collections::HashSet, fmt}; use test_casing::{test_casing, Product}; use zksync_concurrency::{testonly::abort_on_panic, time}; use zksync_consensus_roles::validator; -use zksync_consensus_storage::{BlockStore, InMemoryStorage}; +use zksync_consensus_storage::InMemoryStorage; mod basics; mod fakes; diff --git a/node/libs/storage/src/traits.rs b/node/libs/storage/src/traits.rs index 57687c80..f5a3f5de 100644 --- a/node/libs/storage/src/traits.rs +++ b/node/libs/storage/src/traits.rs @@ -1,7 +1,7 @@ //! Traits for storage. use crate::types::ReplicaState; use async_trait::async_trait; -use std::{fmt, ops, sync::Arc}; +use std::{fmt, ops}; use zksync_concurrency::{ctx, sync::watch}; use zksync_consensus_roles::validator::{BlockNumber, FinalBlock, Payload}; @@ -45,37 +45,6 @@ pub trait BlockStore: fmt::Debug + Send + Sync { fn subscribe_to_block_writes(&self) -> watch::Receiver; } -#[async_trait] -impl BlockStore for Arc { - async fn head_block(&self, ctx: &ctx::Ctx) -> ctx::Result { - (**self).head_block(ctx).await - } - - async fn first_block(&self, ctx: &ctx::Ctx) -> ctx::Result { - (**self).first_block(ctx).await - } - - async fn last_contiguous_block_number(&self, ctx: &ctx::Ctx) -> ctx::Result { - (**self).last_contiguous_block_number(ctx).await - } - - async fn block(&self, ctx: &ctx::Ctx, number: BlockNumber) -> ctx::Result> { - (**self).block(ctx, number).await - } - - async fn missing_block_numbers( - &self, - ctx: &ctx::Ctx, - range: ops::Range, - ) -> ctx::Result> { - (**self).missing_block_numbers(ctx, range).await - } - - fn subscribe_to_block_writes(&self) -> watch::Receiver { - (**self).subscribe_to_block_writes() - } -} - /// Mutable storage of L2 blocks. /// /// Implementations **must** propagate context cancellation using [`ctx::Error::Canceled`]. @@ -88,6 +57,7 @@ pub trait WriteBlockStore: BlockStore { block_number: BlockNumber, _payload: &Payload, ) -> ctx::Result<()> { + tracing::error!("default verify_payload() impl"); let head_number = self.head_block(ctx).await?.header.number; if head_number >= block_number { return Err(anyhow::anyhow!( @@ -101,13 +71,6 @@ pub trait WriteBlockStore: BlockStore { async fn put_block(&self, ctx: &ctx::Ctx, block: &FinalBlock) -> ctx::Result<()>; } -#[async_trait] -impl WriteBlockStore for Arc { - async fn put_block(&self, ctx: &ctx::Ctx, block: &FinalBlock) -> ctx::Result<()> { - (**self).put_block(ctx, block).await - } -} - /// Storage for [`ReplicaState`]. /// /// Implementations **must** propagate context cancellation using [`StorageError::Canceled`]. @@ -123,18 +86,3 @@ pub trait ReplicaStateStore: fmt::Debug + Send + Sync { replica_state: &ReplicaState, ) -> ctx::Result<()>; } - -#[async_trait] -impl ReplicaStateStore for Arc { - async fn replica_state(&self, ctx: &ctx::Ctx) -> ctx::Result> { - (**self).replica_state(ctx).await - } - - async fn put_replica_state( - &self, - ctx: &ctx::Ctx, - replica_state: &ReplicaState, - ) -> ctx::Result<()> { - (**self).put_replica_state(ctx, replica_state).await - } -} From 7ffe2d521ffa66dc947dd30977cc62ed6f0504b8 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Mon, 4 Dec 2023 14:20:20 +0100 Subject: [PATCH 12/14] removed log --- node/libs/storage/src/traits.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/node/libs/storage/src/traits.rs b/node/libs/storage/src/traits.rs index f5a3f5de..50865485 100644 --- a/node/libs/storage/src/traits.rs +++ b/node/libs/storage/src/traits.rs @@ -57,7 +57,6 @@ pub trait WriteBlockStore: BlockStore { block_number: BlockNumber, _payload: &Payload, ) -> ctx::Result<()> { - tracing::error!("default verify_payload() impl"); let head_number = self.head_block(ctx).await?.header.number; if head_number >= block_number { return Err(anyhow::anyhow!( From 421b8dd43918b377b9d4adafe82c9636472562ea Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Mon, 4 Dec 2023 14:58:52 +0100 Subject: [PATCH 13/14] support for non-0 genesis block --- node/actors/bft/src/testonly/make.rs | 3 ++- node/actors/bft/src/testonly/run.rs | 1 + node/actors/bft/src/testonly/ut_harness.rs | 1 + node/actors/executor/src/testonly.rs | 9 ++++----- node/actors/executor/src/tests.rs | 5 +++++ node/actors/sync_blocks/src/tests/mod.rs | 2 +- node/libs/roles/src/validator/messages/block.rs | 4 ++-- node/libs/roles/src/validator/testonly.rs | 2 +- node/libs/storage/src/tests/mod.rs | 2 +- node/tools/src/bin/localnet_config.rs | 1 + 10 files changed, 19 insertions(+), 11 deletions(-) diff --git a/node/actors/bft/src/testonly/make.rs b/node/actors/bft/src/testonly/make.rs index 74e9a1b6..c87fbd78 100644 --- a/node/actors/bft/src/testonly/make.rs +++ b/node/actors/bft/src/testonly/make.rs @@ -59,8 +59,9 @@ pub fn make_genesis( keys: &[validator::SecretKey], protocol_version: validator::ProtocolVersion, payload: validator::Payload, + block_number: validator::BlockNumber, ) -> (validator::FinalBlock, validator::ValidatorSet) { - let header = validator::BlockHeader::genesis(payload.hash()); + let header = validator::BlockHeader::genesis(payload.hash(), block_number); let validator_set = validator::ValidatorSet::new(keys.iter().map(|k| k.public())).unwrap(); let signed_messages: Vec<_> = keys .iter() diff --git a/node/actors/bft/src/testonly/run.rs b/node/actors/bft/src/testonly/run.rs index 8a830641..63384362 100644 --- a/node/actors/bft/src/testonly/run.rs +++ b/node/actors/bft/src/testonly/run.rs @@ -36,6 +36,7 @@ impl Test { &keys, validator::ProtocolVersion::EARLIEST, validator::Payload(vec![]), + validator::BlockNumber(0), ); let nodes: Vec<_> = nodes .into_iter() diff --git a/node/actors/bft/src/testonly/ut_harness.rs b/node/actors/bft/src/testonly/ut_harness.rs index 3ffddff3..dc47f46e 100644 --- a/node/actors/bft/src/testonly/ut_harness.rs +++ b/node/actors/bft/src/testonly/ut_harness.rs @@ -36,6 +36,7 @@ impl UTHarness { &keys, validator::ProtocolVersion::EARLIEST, Payload(vec![]), + validator::BlockNumber(0), ); let (mut consensus, pipe) = crate::testonly::make_consensus(ctx, &keys[0], &val_set, &genesis).await; diff --git a/node/actors/executor/src/testonly.rs b/node/actors/executor/src/testonly.rs index 3ee0a2eb..d88264cd 100644 --- a/node/actors/executor/src/testonly.rs +++ b/node/actors/executor/src/testonly.rs @@ -5,10 +5,7 @@ use std::collections::HashMap; use zksync_concurrency::net; use zksync_consensus_bft::testonly::make_genesis; use zksync_consensus_network::{consensus, testonly::Instance}; -use zksync_consensus_roles::{ - node, - validator::{self, Payload}, -}; +use zksync_consensus_roles::{node, validator}; impl ConsensusConfig { fn from_network_config( @@ -44,7 +41,8 @@ impl FullValidatorConfig { pub fn for_single_validator( rng: &mut impl Rng, protocol_version: validator::ProtocolVersion, - genesis_block_payload: Payload, + genesis_block_payload: validator::Payload, + genesis_block_number: validator::BlockNumber, ) -> Self { let mut net_configs = Instance::new_configs(rng, 1, 0); assert_eq!(net_configs.len(), 1); @@ -58,6 +56,7 @@ impl FullValidatorConfig { &[validator_key.clone()], protocol_version, genesis_block_payload, + genesis_block_number, ); let node_key = net_config.gossip.key.clone(); let node_config = ExecutorConfig { diff --git a/node/actors/executor/src/tests.rs b/node/actors/executor/src/tests.rs index db2fa1d8..0e035355 100644 --- a/node/actors/executor/src/tests.rs +++ b/node/actors/executor/src/tests.rs @@ -77,6 +77,7 @@ async fn executor_misconfiguration(name: &str, mutation: fn(&mut FinalBlock)) { rng, validator::ProtocolVersion::EARLIEST, Payload(vec![]), + BlockNumber(0), ); let genesis_block = &mut validator.node_config.genesis_block; mutation(genesis_block); @@ -98,6 +99,7 @@ async fn genesis_block_mismatch() { rng, validator::ProtocolVersion::EARLIEST, Payload(vec![]), + BlockNumber(0), ); let mut genesis_block = validator.node_config.genesis_block.clone(); genesis_block.header.number = BlockNumber(1); @@ -119,6 +121,7 @@ async fn executing_single_validator() { rng, validator::ProtocolVersion::EARLIEST, Payload(vec![]), + BlockNumber(0), ); let genesis_block = &validator.node_config.genesis_block; let storage = InMemoryStorage::new(genesis_block.clone()); @@ -148,6 +151,7 @@ async fn executing_validator_and_full_node() { rng, validator::ProtocolVersion::EARLIEST, Payload(vec![]), + BlockNumber(0), ); let full_node = validator.connect_full_node(rng); @@ -194,6 +198,7 @@ async fn syncing_full_node_from_snapshot(delay_block_storage: bool) { rng, validator::ProtocolVersion::EARLIEST, Payload(vec![]), + BlockNumber(0), ); let mut full_node = validator.connect_full_node(rng); diff --git a/node/actors/sync_blocks/src/tests/mod.rs b/node/actors/sync_blocks/src/tests/mod.rs index 154c2e0b..f88007b2 100644 --- a/node/actors/sync_blocks/src/tests/mod.rs +++ b/node/actors/sync_blocks/src/tests/mod.rs @@ -48,7 +48,7 @@ impl TestValidators { }; let payload = Payload(vec![]); - let mut latest_block = BlockHeader::genesis(payload.hash()); + let mut latest_block = BlockHeader::genesis(payload.hash(), BlockNumber(0)); let final_blocks = (0..block_count).map(|_| { let final_block = FinalBlock { header: latest_block, diff --git a/node/libs/roles/src/validator/messages/block.rs b/node/libs/roles/src/validator/messages/block.rs index 5fae9ec4..adc3fe7b 100644 --- a/node/libs/roles/src/validator/messages/block.rs +++ b/node/libs/roles/src/validator/messages/block.rs @@ -131,10 +131,10 @@ impl BlockHeader { } /// Creates a genesis block. - pub fn genesis(payload: PayloadHash) -> Self { + pub fn genesis(payload: PayloadHash, number: BlockNumber) -> Self { Self { parent: BlockHeaderHash(Keccak256::default()), - number: BlockNumber(0), + number, payload, } } diff --git a/node/libs/roles/src/validator/testonly.rs b/node/libs/roles/src/validator/testonly.rs index 038aae2e..dfe1e0f4 100644 --- a/node/libs/roles/src/validator/testonly.rs +++ b/node/libs/roles/src/validator/testonly.rs @@ -36,7 +36,7 @@ pub fn make_justification( /// WARNING: it is not a fully correct FinalBlock. pub fn make_genesis_block(rng: &mut R, protocol_version: ProtocolVersion) -> FinalBlock { let payload: Payload = rng.gen(); - let header = BlockHeader::genesis(payload.hash()); + let header = BlockHeader::genesis(payload.hash(), BlockNumber(0)); let justification = make_justification(rng, &header, protocol_version); FinalBlock { header, diff --git a/node/libs/storage/src/tests/mod.rs b/node/libs/storage/src/tests/mod.rs index 229209d1..ffcb4744 100644 --- a/node/libs/storage/src/tests/mod.rs +++ b/node/libs/storage/src/tests/mod.rs @@ -31,7 +31,7 @@ impl InitStore for () { fn genesis_block(rng: &mut impl Rng) -> FinalBlock { let payload = Payload(vec![]); FinalBlock { - header: BlockHeader::genesis(payload.hash()), + header: BlockHeader::genesis(payload.hash(), BlockNumber(0)), payload, justification: rng.gen(), } diff --git a/node/tools/src/bin/localnet_config.rs b/node/tools/src/bin/localnet_config.rs index 3b5461ed..bbeffebf 100644 --- a/node/tools/src/bin/localnet_config.rs +++ b/node/tools/src/bin/localnet_config.rs @@ -71,6 +71,7 @@ fn main() -> anyhow::Result<()> { &validator_keys, protocol_version, validator::Payload(vec![]), + validator::BlockNumber(0), ); // Each node will have `gossip_peers` outbound peers. From 09c240c6647b80252c3113953c98f0305501d474 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Thu, 7 Dec 2023 13:29:59 +0100 Subject: [PATCH 14/14] applied comments --- node/actors/bft/src/lib.rs | 2 +- node/actors/executor/src/lib.rs | 11 +++- node/libs/storage/src/in_memory.rs | 63 ++++++++++++++------ node/libs/storage/src/replica_state.rs | 2 +- node/libs/storage/src/rocksdb.rs | 81 ++++++++++++++++++-------- node/libs/storage/src/traits.rs | 12 +--- 6 files changed, 115 insertions(+), 56 deletions(-) diff --git a/node/actors/bft/src/lib.rs b/node/actors/bft/src/lib.rs index 1da0f59e..840f905a 100644 --- a/node/actors/bft/src/lib.rs +++ b/node/actors/bft/src/lib.rs @@ -81,7 +81,7 @@ impl Consensus { /// Starts the Consensus actor. It will start running, processing incoming messages and /// sending output messages. This is a blocking method. - #[instrument(level = "trace", skip(self) ret)] + #[instrument(level = "trace", skip(self), err)] pub async fn run(mut self, ctx: &ctx::Ctx) -> anyhow::Result<()> { info!( "Starting consensus actor {:?}", diff --git a/node/actors/executor/src/lib.rs b/node/actors/executor/src/lib.rs index 33c46a5f..a65857d6 100644 --- a/node/actors/executor/src/lib.rs +++ b/node/actors/executor/src/lib.rs @@ -1,7 +1,7 @@ //! Library files for the executor. We have it separate from the binary so that we can use these files in the tools crate. use crate::io::Dispatcher; use anyhow::Context as _; -use std::{any, sync::Arc}; +use std::{any, fmt, sync::Arc}; use zksync_concurrency::{ctx, net, scope}; use zksync_consensus_bft::{misc::consensus_threshold, Consensus, PayloadSource}; use zksync_consensus_network as network; @@ -30,6 +30,14 @@ struct ValidatorExecutor { payload_source: Arc, } +impl fmt::Debug for ValidatorExecutor { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("ValidatorExecutor") + .field("config", &self.config) + .finish() + } +} + impl ValidatorExecutor { /// Returns consensus network configuration. fn consensus_config(&self) -> network::consensus::Config { @@ -42,6 +50,7 @@ impl ValidatorExecutor { } /// Executor allowing to spin up all actors necessary for a consensus node. +#[derive(Debug)] pub struct Executor { /// General-purpose executor configuration. executor_config: ExecutorConfig, diff --git a/node/libs/storage/src/in_memory.rs b/node/libs/storage/src/in_memory.rs index 425dd3bf..7bbb1394 100644 --- a/node/libs/storage/src/in_memory.rs +++ b/node/libs/storage/src/in_memory.rs @@ -10,30 +10,33 @@ use zksync_concurrency::{ ctx, sync::{watch, Mutex}, }; -use zksync_consensus_roles::validator::{BlockNumber, FinalBlock}; +use zksync_consensus_roles::validator; #[derive(Debug)] struct BlocksInMemoryStore { - blocks: BTreeMap, - last_contiguous_block_number: BlockNumber, + blocks: BTreeMap, + last_contiguous_block_number: validator::BlockNumber, } impl BlocksInMemoryStore { - fn head_block(&self) -> &FinalBlock { + fn head_block(&self) -> &validator::FinalBlock { self.blocks.values().next_back().unwrap() // ^ `unwrap()` is safe by construction; the storage contains at least the genesis block } - fn first_block(&self) -> &FinalBlock { + fn first_block(&self) -> &validator::FinalBlock { self.blocks.values().next().unwrap() // ^ `unwrap()` is safe by construction; the storage contains at least the genesis block } - fn block(&self, number: BlockNumber) -> Option<&FinalBlock> { + fn block(&self, number: validator::BlockNumber) -> Option<&validator::FinalBlock> { self.blocks.get(&number) } - fn missing_block_numbers(&self, range: ops::Range) -> Vec { + fn missing_block_numbers( + &self, + range: ops::Range, + ) -> Vec { let existing_numbers = self .blocks .range(range.clone()) @@ -43,7 +46,7 @@ impl BlocksInMemoryStore { .collect() } - fn put_block(&mut self, block: FinalBlock) { + fn put_block(&mut self, block: validator::FinalBlock) { let block_number = block.header.number; tracing::debug!("Inserting block #{block_number} into database"); if let Some(prev_block) = self.blocks.insert(block_number, block) { @@ -69,12 +72,12 @@ impl BlocksInMemoryStore { pub struct InMemoryStorage { blocks: Mutex, replica_state: Mutex>, - blocks_sender: watch::Sender, + blocks_sender: watch::Sender, } impl InMemoryStorage { /// Creates a new store containing only the specified `genesis_block`. - pub fn new(genesis_block: FinalBlock) -> Self { + pub fn new(genesis_block: validator::FinalBlock) -> Self { let genesis_block_number = genesis_block.header.number; Self { blocks: Mutex::new(BlocksInMemoryStore { @@ -89,38 +92,62 @@ impl InMemoryStorage { #[async_trait] impl BlockStore for InMemoryStorage { - async fn head_block(&self, _ctx: &ctx::Ctx) -> ctx::Result { + async fn head_block(&self, _ctx: &ctx::Ctx) -> ctx::Result { Ok(self.blocks.lock().await.head_block().clone()) } - async fn first_block(&self, _ctx: &ctx::Ctx) -> ctx::Result { + async fn first_block(&self, _ctx: &ctx::Ctx) -> ctx::Result { Ok(self.blocks.lock().await.first_block().clone()) } - async fn last_contiguous_block_number(&self, _ctx: &ctx::Ctx) -> ctx::Result { + async fn last_contiguous_block_number( + &self, + _ctx: &ctx::Ctx, + ) -> ctx::Result { Ok(self.blocks.lock().await.last_contiguous_block_number) } - async fn block(&self, _ctx: &ctx::Ctx, number: BlockNumber) -> ctx::Result> { + async fn block( + &self, + _ctx: &ctx::Ctx, + number: validator::BlockNumber, + ) -> ctx::Result> { Ok(self.blocks.lock().await.block(number).cloned()) } async fn missing_block_numbers( &self, _ctx: &ctx::Ctx, - range: ops::Range, - ) -> ctx::Result> { + range: ops::Range, + ) -> ctx::Result> { Ok(self.blocks.lock().await.missing_block_numbers(range)) } - fn subscribe_to_block_writes(&self) -> watch::Receiver { + fn subscribe_to_block_writes(&self) -> watch::Receiver { self.blocks_sender.subscribe() } } #[async_trait] impl WriteBlockStore for InMemoryStorage { - async fn put_block(&self, _ctx: &ctx::Ctx, block: &FinalBlock) -> ctx::Result<()> { + /// Just verifies that the payload is for the successor of the current head. + async fn verify_payload( + &self, + ctx: &ctx::Ctx, + block_number: validator::BlockNumber, + _payload: &validator::Payload, + ) -> ctx::Result<()> { + let head_number = self.head_block(ctx).await?.header.number; + if head_number >= block_number { + return Err(anyhow::anyhow!( + "received proposal for block {block_number:?}, while head is at {head_number:?}" + ) + .into()); + } + Ok(()) + } + + async fn put_block(&self, _ctx: &ctx::Ctx, block: &validator::FinalBlock) -> ctx::Result<()> { self.blocks.lock().await.put_block(block.clone()); self.blocks_sender.send_replace(block.header.number); Ok(()) diff --git a/node/libs/storage/src/replica_state.rs b/node/libs/storage/src/replica_state.rs index aa46e15c..5daf2a82 100644 --- a/node/libs/storage/src/replica_state.rs +++ b/node/libs/storage/src/replica_state.rs @@ -20,7 +20,7 @@ impl From for ReplicaState { } } -/// ReplicaStorage. +/// Storage combining [`ReplicaStateStore`] and [`WriteBlockStore`]. #[derive(Debug, Clone)] pub struct ReplicaStore { state: Arc, diff --git a/node/libs/storage/src/rocksdb.rs b/node/libs/storage/src/rocksdb.rs index a5f7b121..6366e313 100644 --- a/node/libs/storage/src/rocksdb.rs +++ b/node/libs/storage/src/rocksdb.rs @@ -18,7 +18,7 @@ use std::{ }, }; use zksync_concurrency::{ctx, scope, sync::watch}; -use zksync_consensus_roles::validator::{BlockNumber, FinalBlock}; +use zksync_consensus_roles::validator; /// Enum used to represent a key in the database. It also acts as a separator between different stores. #[derive(Debug, Clone, PartialEq, Eq)] @@ -27,8 +27,8 @@ enum DatabaseKey { /// ReplicaState -> ReplicaState ReplicaState, /// Key used to store the finalized blocks. - /// Block(BlockNumber) -> FinalBlock - Block(BlockNumber), + /// Block(validator::BlockNumber) -> validator::FinalBlock + Block(validator::BlockNumber), } impl DatabaseKey { @@ -57,11 +57,11 @@ impl DatabaseKey { } /// Parses the specified bytes as a `Self::Block(_)` key. - pub(crate) fn parse_block_key(raw_key: &[u8]) -> anyhow::Result { + pub(crate) fn parse_block_key(raw_key: &[u8]) -> anyhow::Result { let raw_key = raw_key .try_into() .context("Invalid encoding for block key")?; - Ok(BlockNumber(u64::from_be_bytes(raw_key))) + Ok(validator::BlockNumber(u64::from_be_bytes(raw_key))) } } @@ -79,7 +79,7 @@ pub struct RocksdbStorage { /// that blocks are never removed from the DB. cached_last_contiguous_block_number: AtomicU64, /// Sender of numbers of written blocks. - block_writes_sender: watch::Sender, + block_writes_sender: watch::Sender, } impl RocksdbStorage { @@ -87,7 +87,11 @@ impl RocksdbStorage { /// a new one. We need the genesis block of the chain as input. // TODO(bruno): we want to eventually start pruning old blocks, so having the genesis // block might be unnecessary. - pub async fn new(ctx: &ctx::Ctx, genesis_block: &FinalBlock, path: &Path) -> ctx::Result { + pub async fn new( + ctx: &ctx::Ctx, + genesis_block: &validator::FinalBlock, + path: &Path, + ) -> ctx::Result { let mut options = rocksdb::Options::default(); options.create_missing_column_families(true); options.create_if_missing(true); @@ -127,7 +131,7 @@ impl RocksdbStorage { self.inner.write().expect("DB lock is poisoned") } - fn head_block_blocking(&self) -> anyhow::Result { + fn head_block_blocking(&self) -> anyhow::Result { let db = self.read(); let mut options = ReadOptions::default(); @@ -141,7 +145,7 @@ impl RocksdbStorage { } /// Returns a block with the least number stored in this database. - fn first_block_blocking(&self) -> anyhow::Result { + fn first_block_blocking(&self) -> anyhow::Result { let db = self.read(); let mut options = ReadOptions::default(); @@ -154,11 +158,11 @@ impl RocksdbStorage { zksync_protobuf::decode(&first_block).context("Failed decoding first stored block bytes") } - fn last_contiguous_block_number_blocking(&self) -> anyhow::Result { + fn last_contiguous_block_number_blocking(&self) -> anyhow::Result { let last_contiguous_block_number = self .cached_last_contiguous_block_number .load(Ordering::Relaxed); - let last_contiguous_block_number = BlockNumber(last_contiguous_block_number); + let last_contiguous_block_number = validator::BlockNumber(last_contiguous_block_number); let last_contiguous_block_number = self.last_contiguous_block_number_impl(last_contiguous_block_number)?; @@ -175,8 +179,8 @@ impl RocksdbStorage { // is for the `cached_last_contiguous_block_number` to be present in the database. fn last_contiguous_block_number_impl( &self, - cached_last_contiguous_block_number: BlockNumber, - ) -> anyhow::Result { + cached_last_contiguous_block_number: validator::BlockNumber, + ) -> anyhow::Result { let db = self.read(); let mut options = ReadOptions::default(); @@ -202,7 +206,10 @@ impl RocksdbStorage { } /// Gets a block by its number. - fn block_blocking(&self, number: BlockNumber) -> anyhow::Result> { + fn block_blocking( + &self, + number: validator::BlockNumber, + ) -> anyhow::Result> { let db = self.read(); let Some(raw_block) = db @@ -219,8 +226,8 @@ impl RocksdbStorage { /// Iterates over block numbers in the specified `range` that the DB *does not* have. fn missing_block_numbers_blocking( &self, - range: ops::Range, - ) -> anyhow::Result> { + range: ops::Range, + ) -> anyhow::Result> { let db = self.read(); let mut options = ReadOptions::default(); @@ -242,7 +249,7 @@ impl RocksdbStorage { // ---------------- Write methods ---------------- /// Insert a new block into the database. - fn put_block_blocking(&self, finalized_block: &FinalBlock) -> anyhow::Result<()> { + fn put_block_blocking(&self, finalized_block: &validator::FinalBlock) -> anyhow::Result<()> { let db = self.write(); let block_number = finalized_block.header.number; tracing::debug!("Inserting new block #{block_number} into the database."); @@ -292,38 +299,62 @@ impl fmt::Debug for RocksdbStorage { #[async_trait] impl BlockStore for RocksdbStorage { - async fn head_block(&self, _ctx: &ctx::Ctx) -> ctx::Result { + async fn head_block(&self, _ctx: &ctx::Ctx) -> ctx::Result { Ok(scope::wait_blocking(|| self.head_block_blocking()).await?) } - async fn first_block(&self, _ctx: &ctx::Ctx) -> ctx::Result { + async fn first_block(&self, _ctx: &ctx::Ctx) -> ctx::Result { Ok(scope::wait_blocking(|| self.first_block_blocking()).await?) } - async fn last_contiguous_block_number(&self, _ctx: &ctx::Ctx) -> ctx::Result { + async fn last_contiguous_block_number( + &self, + _ctx: &ctx::Ctx, + ) -> ctx::Result { Ok(scope::wait_blocking(|| self.last_contiguous_block_number_blocking()).await?) } - async fn block(&self, _ctx: &ctx::Ctx, number: BlockNumber) -> ctx::Result> { + async fn block( + &self, + _ctx: &ctx::Ctx, + number: validator::BlockNumber, + ) -> ctx::Result> { Ok(scope::wait_blocking(|| self.block_blocking(number)).await?) } async fn missing_block_numbers( &self, _ctx: &ctx::Ctx, - range: ops::Range, - ) -> ctx::Result> { + range: ops::Range, + ) -> ctx::Result> { Ok(scope::wait_blocking(|| self.missing_block_numbers_blocking(range)).await?) } - fn subscribe_to_block_writes(&self) -> watch::Receiver { + fn subscribe_to_block_writes(&self) -> watch::Receiver { self.block_writes_sender.subscribe() } } #[async_trait] impl WriteBlockStore for RocksdbStorage { - async fn put_block(&self, _ctx: &ctx::Ctx, block: &FinalBlock) -> ctx::Result<()> { + /// Just verifies that the payload is for the successor of the current head. + async fn verify_payload( + &self, + ctx: &ctx::Ctx, + block_number: validator::BlockNumber, + _payload: &validator::Payload, + ) -> ctx::Result<()> { + let head_number = self.head_block(ctx).await?.header.number; + if head_number >= block_number { + return Err(anyhow::anyhow!( + "received proposal for block {block_number:?}, while head is at {head_number:?}" + ) + .into()); + } + Ok(()) + } + + async fn put_block(&self, _ctx: &ctx::Ctx, block: &validator::FinalBlock) -> ctx::Result<()> { Ok(scope::wait_blocking(|| self.put_block_blocking(block)).await?) } } diff --git a/node/libs/storage/src/traits.rs b/node/libs/storage/src/traits.rs index 50865485..83e069f6 100644 --- a/node/libs/storage/src/traits.rs +++ b/node/libs/storage/src/traits.rs @@ -56,16 +56,8 @@ pub trait WriteBlockStore: BlockStore { ctx: &ctx::Ctx, block_number: BlockNumber, _payload: &Payload, - ) -> ctx::Result<()> { - let head_number = self.head_block(ctx).await?.header.number; - if head_number >= block_number { - return Err(anyhow::anyhow!( - "received proposal for block {block_number:?}, while head is at {head_number:?}" - ) - .into()); - } - Ok(()) - } + ) -> ctx::Result<()>; + /// Puts a block into this storage. async fn put_block(&self, ctx: &ctx::Ctx, block: &FinalBlock) -> ctx::Result<()>; }