diff --git a/node/actors/bft/src/leader/replica_commit.rs b/node/actors/bft/src/leader/replica_commit.rs index f212cdc1..f0e817ac 100644 --- a/node/actors/bft/src/leader/replica_commit.rs +++ b/node/actors/bft/src/leader/replica_commit.rs @@ -1,7 +1,7 @@ //! Handler of a ReplicaCommit message. + use super::StateMachine; use crate::metrics; -use std::collections::HashMap; use tracing::instrument; use zksync_concurrency::{ctx, metrics::LatencyHistogramExt as _}; use zksync_consensus_network::io::{ConsensusInputMessage, Target}; @@ -39,10 +39,10 @@ pub(crate) enum Error { #[error("invalid message: {0:#}")] InvalidMessage(#[source] anyhow::Error), /// Duplicate message from a replica. - #[error("duplicate message from a replica (existing message: {existing_message:?}")] - DuplicateMessage { - /// Existing message from the same replica. - existing_message: validator::ReplicaCommit, + #[error("Replica signed more than one message for same view (message: {message:?}")] + DuplicateSignature { + /// Offending message. + message: validator::ReplicaCommit, }, /// Invalid message signature. #[error("invalid signature: {0:#}")] @@ -70,7 +70,7 @@ impl StateMachine { }); } - // Check that the message signer is in the validator set. + // Check that the message signer is in the validator committee. if !self.config.genesis().validators.contains(author) { return Err(Error::NonValidatorSigner { signer: author.clone(), @@ -96,16 +96,23 @@ impl StateMachine { return Err(Error::NotLeaderInView); } + // Get current incrementally-constructed QC to work on it + let commit_qc = self + .commit_qcs + .entry(message.view.number) + .or_default() + .entry(message.clone()) + .or_insert_with(|| CommitQC::new(message.clone(), self.config.genesis())); + // If we already have a message from the same validator and for the same view, we discard it. - if let Some(existing_message) = self - .commit_message_cache - .get(&message.view.number) - .and_then(|x| x.get(author)) - { - return Err(Error::DuplicateMessage { - existing_message: existing_message.msg.clone(), + let validator_view = self.validator_views.get(author); + if validator_view.is_some_and(|view_number| *view_number >= message.view.number) { + return Err(Error::DuplicateSignature { + message: commit_qc.message.clone(), }); } + self.validator_views + .insert(author.clone(), message.view.number); // ----------- Checking the signed part of the message -------------- @@ -118,37 +125,16 @@ impl StateMachine { // ----------- All checks finished. Now we process the message. -------------- - // TODO: we have a bug here since we don't check whether replicas commit - // to the same proposal. - - // We add the message to the incrementally-constructed QC. - self.commit_qcs - .entry(message.view.number) - .or_insert_with(|| CommitQC::new(message.clone(), self.config.genesis())) - .add(&signed_message, self.config.genesis()); + // Add the message to the QC. + commit_qc.add(&signed_message, self.config.genesis()); - // We store the message in our cache. - let cache_entry = self - .commit_message_cache - .entry(message.view.number) - .or_default(); - cache_entry.insert(author.clone(), signed_message.clone()); - - // Now we check if we have enough messages to continue. - let mut by_proposal: HashMap<_, Vec<_>> = HashMap::new(); - for msg in cache_entry.values() { - by_proposal.entry(msg.msg.proposal).or_default().push(msg); - } - let threshold = self.config.genesis().validators.threshold(); - let Some((_, replica_messages)) = - by_proposal.into_iter().find(|(_, v)| v.len() >= threshold) - else { + // Now we check if we have enough weight to continue. + let weight = self.config.genesis().validators.weight(&commit_qc.signers); + if weight < self.config.genesis().validators.threshold() { return Ok(()); }; - debug_assert_eq!(replica_messages.len(), threshold); // ----------- Update the state machine -------------- - let now = ctx.now(); metrics::METRICS .leader_commit_phase_latency @@ -159,12 +145,13 @@ impl StateMachine { // ----------- Prepare our message and send it. -------------- - // Remove replica commit messages for this view, so that we don't create a new leader commit - // for this same view if we receive another replica commit message after this. - self.commit_message_cache.remove(&message.view.number); - // Consume the incrementally-constructed QC for this view. - let justification = self.commit_qcs.remove(&message.view.number).unwrap(); + let justification = self + .commit_qcs + .remove(&message.view.number) + .unwrap() + .remove(message) + .unwrap(); // Broadcast the leader commit message to all replicas (ourselves included). let output_message = ConsensusInputMessage { @@ -180,7 +167,6 @@ impl StateMachine { // Clean the caches. self.prepare_message_cache.retain(|k, _| k >= &self.view); - self.commit_message_cache.retain(|k, _| k >= &self.view); Ok(()) } diff --git a/node/actors/bft/src/leader/replica_prepare.rs b/node/actors/bft/src/leader/replica_prepare.rs index 1f52e4f4..4e3a6f1b 100644 --- a/node/actors/bft/src/leader/replica_prepare.rs +++ b/node/actors/bft/src/leader/replica_prepare.rs @@ -2,7 +2,7 @@ use super::StateMachine; use tracing::instrument; use zksync_concurrency::{ctx, error::Wrap}; -use zksync_consensus_roles::validator::{self, ProtocolVersion}; +use zksync_consensus_roles::validator; /// Errors that can occur when processing a "replica prepare" message. #[derive(Debug, thiserror::Error)] @@ -11,9 +11,9 @@ pub(crate) enum Error { #[error("incompatible protocol version (message version: {message_version:?}, local version: {local_version:?}")] IncompatibleProtocolVersion { /// Message version. - message_version: ProtocolVersion, + message_version: validator::ProtocolVersion, /// Local version. - local_version: ProtocolVersion, + local_version: validator::ProtocolVersion, }, /// Message signer isn't part of the validator set. #[error("Message signer isn't part of the validator set (signer: {signer:?})")] @@ -132,25 +132,22 @@ impl StateMachine { // ----------- All checks finished. Now we process the message. -------------- // We add the message to the incrementally-constructed QC. - self.prepare_qcs + let prepare_qc = self + .prepare_qcs .entry(message.view.number) - .or_insert_with(|| validator::PrepareQC::new(message.view.clone())) - .add(&signed_message, self.config.genesis()); + .or_insert_with(|| validator::PrepareQC::new(message.view.clone())); + prepare_qc.add(&signed_message, self.config.genesis()); // We store the message in our cache. self.prepare_message_cache .entry(message.view.number) .or_default() - .insert(author.clone(), signed_message); + .insert(author.clone(), signed_message.clone()); - // Now we check if we have enough messages to continue. - let num_messages = self - .prepare_message_cache - .get(&message.view.number) - .unwrap() - .len(); - - if num_messages < self.config.genesis().validators.threshold() { + // Now we check if we have enough weight to continue. + if prepare_qc.weight(&self.config.genesis().validators) + < self.config.genesis().validators.threshold() + { return Ok(()); } @@ -158,8 +155,6 @@ impl StateMachine { // for this same view if we receive another replica prepare message after this. self.prepare_message_cache.remove(&message.view.number); - debug_assert_eq!(num_messages, self.config.genesis().validators.threshold()); - // ----------- Update the state machine -------------- self.view = message.view.number; diff --git a/node/actors/bft/src/leader/state_machine.rs b/node/actors/bft/src/leader/state_machine.rs index 8d44909a..8e6dc15b 100644 --- a/node/actors/bft/src/leader/state_machine.rs +++ b/node/actors/bft/src/leader/state_machine.rs @@ -36,13 +36,11 @@ pub(crate) struct StateMachine { pub(crate) prepare_qcs: BTreeMap, /// Newest prepare QC composed from the `ReplicaPrepare` messages. pub(crate) prepare_qc: sync::watch::Sender>, - /// A cache of replica commit messages indexed by view number and validator. - pub(crate) commit_message_cache: BTreeMap< - validator::ViewNumber, - HashMap>, - >, - /// Commit QCs indexed by view number. - pub(crate) commit_qcs: BTreeMap, + /// Commit QCs indexed by view number and then by message. + pub(crate) commit_qcs: + BTreeMap>, + /// Latest view a validator has signed a message for. + pub(crate) validator_views: BTreeMap, } impl StateMachine { @@ -67,10 +65,10 @@ impl StateMachine { phase_start: ctx.now(), prepare_message_cache: BTreeMap::new(), prepare_qcs: BTreeMap::new(), - commit_message_cache: BTreeMap::new(), prepare_qc: sync::watch::channel(None).0, commit_qcs: BTreeMap::new(), inbound_pipe: recv, + validator_views: BTreeMap::new(), }; (this, send) diff --git a/node/actors/bft/src/leader/tests.rs b/node/actors/bft/src/leader/tests.rs index 9974c5c8..d3df3c9d 100644 --- a/node/actors/bft/src/leader/tests.rs +++ b/node/actors/bft/src/leader/tests.rs @@ -333,6 +333,68 @@ async fn replica_prepare_high_qc_of_future_view() { .unwrap(); } +/// Check all ReplicaPrepare are included for weight calculation +/// even on different messages for the same view. +#[tokio::test] +async fn replica_prepare_different_messages() { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + scope::run!(ctx, |ctx, s| async { + let (mut util, runner) = UTHarness::new_many(ctx).await; + s.spawn_bg(runner.run(ctx)); + + util.produce_block(ctx).await; + + let view = util.replica_view(); + let replica_prepare = util.new_replica_prepare(); + + // Create a different proposal for the same view + let proposal = replica_prepare.clone().high_vote.unwrap().proposal; + let mut different_proposal = proposal; + different_proposal.number = different_proposal.number.next(); + + // Create a new ReplicaPrepare with the different proposal + let mut other_replica_prepare = replica_prepare.clone(); + let mut high_vote = other_replica_prepare.high_vote.clone().unwrap(); + high_vote.proposal = different_proposal; + let high_qc = util.new_commit_qc(|msg| { + msg.proposal = different_proposal; + msg.view = view.clone() + }); + + other_replica_prepare.high_vote = Some(high_vote); + other_replica_prepare.high_qc = Some(high_qc); + + let validators = util.keys.len(); + + // half of the validators sign replica_prepare + for i in 0..validators / 2 { + util.process_replica_prepare(ctx, util.keys[i].sign_msg(replica_prepare.clone())) + .await + .unwrap(); + } + + let mut replica_commit_result = None; + // The rest of the validators until threshold sign other_replica_prepare + for i in validators / 2..util.genesis().validators.threshold() as usize { + replica_commit_result = util + .process_replica_prepare(ctx, util.keys[i].sign_msg(other_replica_prepare.clone())) + .await + .unwrap(); + } + + // That should be enough for a proposal to be committed (even with different proposals) + assert_matches!(replica_commit_result, Some(_)); + + // Check the first proposal has been committed (as it has more votes) + let message = replica_commit_result.unwrap().msg; + assert_eq!(message.proposal, proposal); + Ok(()) + }) + .await + .unwrap(); +} + #[tokio::test] async fn replica_commit_sanity() { zksync_concurrency::testonly::abort_on_panic(); @@ -484,15 +546,31 @@ async fn replica_commit_already_exists() { .await .unwrap() .is_none()); + + // Processing twice same ReplicaCommit for same view gets DuplicateSignature error let res = util .process_replica_commit(ctx, util.sign(replica_commit.clone())) .await; assert_matches!( res, - Err(replica_commit::Error::DuplicateMessage { existing_message }) => { - assert_eq!(existing_message, replica_commit) + Err(replica_commit::Error::DuplicateSignature { message }) => { + assert_eq!(message, replica_commit) + } + ); + + // Processing twice different ReplicaCommit for same view gets DuplicateSignature error too + let mut different_replica_commit = replica_commit.clone(); + different_replica_commit.proposal.number = replica_commit.proposal.number.next(); + let res = util + .process_replica_commit(ctx, util.sign(different_replica_commit.clone())) + .await; + assert_matches!( + res, + Err(replica_commit::Error::DuplicateSignature { message }) => { + assert_eq!(message, different_replica_commit) } ); + Ok(()) }) .await @@ -572,3 +650,48 @@ async fn replica_commit_unexpected_proposal() { .await .unwrap(); } + +/// Proposal should be the same for every ReplicaCommit +/// Check it doesn't fail if one validator sends a different proposal in +/// the ReplicaCommit +#[tokio::test] +async fn replica_commit_different_proposals() { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + scope::run!(ctx, |ctx, s| async { + let (mut util, runner) = UTHarness::new_many(ctx).await; + s.spawn_bg(runner.run(ctx)); + + let replica_commit = util.new_replica_commit(ctx).await; + + // Process a modified replica_commit (ie. from a malicious or wrong node) + let mut bad_replica_commit = replica_commit.clone(); + bad_replica_commit.proposal.number = replica_commit.proposal.number.next(); + util.process_replica_commit(ctx, util.sign(bad_replica_commit)) + .await + .unwrap(); + + // The rest of the validators sign the correct one + let mut replica_commit_result = None; + for i in 1..util.keys.len() { + replica_commit_result = util + .process_replica_commit(ctx, util.keys[i].sign_msg(replica_commit.clone())) + .await + .unwrap(); + } + + // Check correct proposal has been committed + assert_matches!( + replica_commit_result, + Some(leader_commit) => { + assert_eq!( + leader_commit.msg.justification.message.proposal, + replica_commit.proposal + ); + } + ); + Ok(()) + }) + .await + .unwrap(); +} diff --git a/node/actors/bft/src/testonly/run.rs b/node/actors/bft/src/testonly/run.rs index af06ea04..9d346174 100644 --- a/node/actors/bft/src/testonly/run.rs +++ b/node/actors/bft/src/testonly/run.rs @@ -17,7 +17,7 @@ pub(crate) enum Network { #[derive(Clone)] pub(crate) struct Test { pub(crate) network: Network, - pub(crate) nodes: Vec, + pub(crate) nodes: Vec<(Behavior, u64)>, pub(crate) blocks_to_finalize: usize, } @@ -25,7 +25,10 @@ impl Test { /// Run a test with the given parameters. pub(crate) async fn run(&self, ctx: &ctx::Ctx) -> anyhow::Result<()> { let rng = &mut ctx.rng(); - let setup = validator::testonly::Setup::new(rng, self.nodes.len()); + let setup = validator::testonly::Setup::new_with_weights( + rng, + self.nodes.iter().map(|(_, w)| *w).collect(), + ); let nets: Vec<_> = network::testonly::new_configs(rng, &setup, 1); let mut nodes = vec![]; let mut honest = vec![]; @@ -33,12 +36,12 @@ impl Test { for (i, net) in nets.into_iter().enumerate() { let (store, runner) = new_store(ctx, &setup.genesis).await; s.spawn_bg(runner.run(ctx)); - if self.nodes[i] == Behavior::Honest { + if self.nodes[i].0 == Behavior::Honest { honest.push(store.clone()); } nodes.push(Node { net, - behavior: self.nodes[i], + behavior: self.nodes[i].0, block_store: store, }); } diff --git a/node/actors/bft/src/testonly/ut_harness.rs b/node/actors/bft/src/testonly/ut_harness.rs index 29590cce..a99dae9f 100644 --- a/node/actors/bft/src/testonly/ut_harness.rs +++ b/node/actors/bft/src/testonly/ut_harness.rs @@ -7,7 +7,7 @@ use crate::{ testonly, Config, PayloadManager, }; use assert_matches::assert_matches; -use std::{cmp::Ordering, sync::Arc}; +use std::sync::Arc; use zksync_concurrency::ctx; use zksync_consensus_network as network; use zksync_consensus_roles::validator::{ @@ -83,8 +83,9 @@ impl UTHarness { /// Creates a new `UTHarness` with minimally-significant validator set size. pub(crate) async fn new_many(ctx: &ctx::Ctx) -> (UTHarness, BlockStoreRunner) { let num_validators = 6; - assert!(validator::faulty_replicas(num_validators) > 0); - UTHarness::new(ctx, num_validators).await + let (util, runner) = UTHarness::new(ctx, num_validators).await; + assert!(util.genesis().validators.max_faulty_weight() > 0); + (util, runner) } /// Triggers replica timeout, validates the new ReplicaPrepare @@ -224,15 +225,22 @@ impl UTHarness { ctx: &ctx::Ctx, msg: ReplicaPrepare, ) -> Signed { - let want_threshold = self.genesis().validators.threshold(); let mut leader_prepare = None; let msgs: Vec<_> = self.keys.iter().map(|k| k.sign_msg(msg.clone())).collect(); + let mut first_match = true; for (i, msg) in msgs.into_iter().enumerate() { let res = self.process_replica_prepare(ctx, msg).await; - match (i + 1).cmp(&want_threshold) { - Ordering::Equal => leader_prepare = res.unwrap(), - Ordering::Less => assert!(res.unwrap().is_none()), - Ordering::Greater => assert_matches!(res, Err(replica_prepare::Error::Old { .. })), + match ( + (i + 1) as u64 * self.genesis().validators.iter().next().unwrap().weight + < self.genesis().validators.threshold(), + first_match, + ) { + (true, _) => assert!(res.unwrap().is_none()), + (false, true) => { + first_match = false; + leader_prepare = res.unwrap() + } + (false, false) => assert_matches!(res, Err(replica_prepare::Error::Old { .. })), } } leader_prepare.unwrap() @@ -252,15 +260,22 @@ impl UTHarness { ctx: &ctx::Ctx, msg: ReplicaCommit, ) -> Signed { + let mut first_match = true; for (i, key) in self.keys.iter().enumerate() { let res = self .leader .process_replica_commit(ctx, key.sign_msg(msg.clone())); - let want_threshold = self.genesis().validators.threshold(); - match (i + 1).cmp(&want_threshold) { - Ordering::Equal => res.unwrap(), - Ordering::Less => res.unwrap(), - Ordering::Greater => assert_matches!(res, Err(replica_commit::Error::Old { .. })), + match ( + (i + 1) as u64 * self.genesis().validators.iter().next().unwrap().weight + < self.genesis().validators.threshold(), + first_match, + ) { + (true, _) => res.unwrap(), + (false, true) => { + first_match = false; + res.unwrap() + } + (false, false) => assert_matches!(res, Err(replica_commit::Error::Old { .. })), } } self.try_recv().unwrap() diff --git a/node/actors/bft/src/tests.rs b/node/actors/bft/src/tests.rs index c73cf233..eaddccb2 100644 --- a/node/actors/bft/src/tests.rs +++ b/node/actors/bft/src/tests.rs @@ -8,9 +8,12 @@ async fn run_test(behavior: Behavior, network: Network) { let ctx = &ctx::test_root(&ctx::RealClock); const NODES: usize = 11; - let mut nodes = vec![behavior; NODES]; - for n in &mut nodes[0..validator::threshold(NODES)] { - *n = Behavior::Honest; + let mut nodes = vec![(behavior, 1u64); NODES]; + // validator::threshold(NODES) will calculate required nodes to validate a message + // given each node weight is 1 + let honest_nodes_amount = validator::threshold(NODES as u64) as usize; + for n in &mut nodes[0..honest_nodes_amount] { + n.0 = Behavior::Honest; } Test { network, @@ -185,7 +188,7 @@ async fn non_proposing_leader() { let ctx = &ctx::test_root(&ctx::AffineClock::new(5.)); Test { network: Network::Real, - nodes: vec![Behavior::Honest, Behavior::HonestNotProposing], + nodes: vec![(Behavior::Honest, 1), (Behavior::HonestNotProposing, 1)], blocks_to_finalize: 10, } .run(ctx) diff --git a/node/actors/executor/src/lib.rs b/node/actors/executor/src/lib.rs index 9c4e4688..04e3a9c4 100644 --- a/node/actors/executor/src/lib.rs +++ b/node/actors/executor/src/lib.rs @@ -110,7 +110,7 @@ impl Executor { .block_store .genesis() .validators - .iter() + .iter_keys() .any(|key| key == &validator.key.public()) { anyhow::bail!("this validator doesn't belong to the consensus"); diff --git a/node/actors/network/src/consensus/mod.rs b/node/actors/network/src/consensus/mod.rs index 3de0243c..10123219 100644 --- a/node/actors/network/src/consensus/mod.rs +++ b/node/actors/network/src/consensus/mod.rs @@ -69,7 +69,7 @@ impl Network { /// Constructs a new consensus network state. pub(crate) fn new(gossip: Arc) -> Option> { let key = gossip.cfg.validator_key.clone()?; - let validators: HashSet<_> = gossip.genesis().validators.iter().cloned().collect(); + let validators: HashSet<_> = gossip.genesis().validators.iter_keys().cloned().collect(); Some(Arc::new(Self { key, inbound: PoolWatch::new(validators.clone(), 0), diff --git a/node/actors/network/src/gossip/tests.rs b/node/actors/network/src/gossip/tests.rs index 40a50875..b897e0a6 100644 --- a/node/actors/network/src/gossip/tests.rs +++ b/node/actors/network/src/gossip/tests.rs @@ -14,7 +14,7 @@ use zksync_concurrency::{ testonly::{abort_on_panic, set_timeout}, time, }; -use zksync_consensus_roles::validator::{self, BlockNumber, FinalBlock}; +use zksync_consensus_roles::validator::{self, BlockNumber, FinalBlock, WeightedValidator}; use zksync_consensus_storage::testonly::new_store; #[tokio::test] @@ -143,7 +143,11 @@ async fn test_validator_addrs() { let rng = &mut ctx::test_root(&ctx::RealClock).rng(); let keys: Vec = (0..8).map(|_| rng.gen()).collect(); - let validators = validator::ValidatorSet::new(keys.iter().map(|k| k.public())).unwrap(); + let validators = validator::Committee::new(keys.iter().map(|k| WeightedValidator { + key: k.public(), + weight: 1250, + })) + .unwrap(); let va = ValidatorAddrsWatch::default(); let mut sub = va.subscribe(); diff --git a/node/actors/network/src/gossip/validator_addrs.rs b/node/actors/network/src/gossip/validator_addrs.rs index 8fd21b63..51c736af 100644 --- a/node/actors/network/src/gossip/validator_addrs.rs +++ b/node/actors/network/src/gossip/validator_addrs.rs @@ -41,7 +41,7 @@ impl ValidatorAddrs { /// Returns true iff some new entry was added. pub(super) fn update( &mut self, - validators: &validator::ValidatorSet, + validators: &validator::Committee, data: &[Arc>], ) -> anyhow::Result { let mut changed = false; @@ -119,7 +119,7 @@ impl ValidatorAddrsWatch { /// invalid entry should be banned. pub(crate) async fn update( &self, - validators: &validator::ValidatorSet, + validators: &validator::Committee, data: &[Arc>], ) -> anyhow::Result<()> { let this = self.0.lock().await; diff --git a/node/actors/network/src/lib.rs b/node/actors/network/src/lib.rs index 218e0032..329912a1 100644 --- a/node/actors/network/src/lib.rs +++ b/node/actors/network/src/lib.rs @@ -157,7 +157,7 @@ impl Runner { // If we are active validator ... if validators.contains(&c.key.public()) { // Maintain outbound connections. - for peer in validators.iter() { + for peer in validators.iter_keys() { s.spawn(async { c.maintain_connection(ctx, peer).await; Ok(()) diff --git a/node/actors/network/src/testonly.rs b/node/actors/network/src/testonly.rs index 0241ed8e..8632084a 100644 --- a/node/actors/network/src/testonly.rs +++ b/node/actors/network/src/testonly.rs @@ -194,7 +194,7 @@ impl Instance { pub async fn wait_for_consensus_connections(&self) { let consensus_state = self.net.consensus.as_ref().unwrap(); - let want: HashSet<_> = self.genesis().validators.iter().cloned().collect(); + let want: HashSet<_> = self.genesis().validators.iter_keys().cloned().collect(); consensus_state .inbound .subscribe() diff --git a/node/libs/roles/src/proto/validator.proto b/node/libs/roles/src/proto/validator.proto index 2b1d6329..5b5883b9 100644 --- a/node/libs/roles/src/proto/validator.proto +++ b/node/libs/roles/src/proto/validator.proto @@ -9,9 +9,13 @@ message Fork { optional uint64 first_block = 2; // required; BlockNumber } +// validators and validators_v1 are mutually exclusive. +// They should be put into a oneof, but it may not recognize this as +// a backward compatible change: https://buf.build/docs/breaking/rules#field_same_oneof message Genesis { optional Fork fork = 1; // required - repeated PublicKey validators = 2; + repeated PublicKey validators = 2 [deprecated = true]; + repeated WeightedValidator validators_v1 = 3; } message GenesisHash { @@ -166,3 +170,9 @@ message Signature { message AggregateSignature { optional bytes bn254 = 1; // required } + +message WeightedValidator { + optional PublicKey key = 1; // required + optional uint64 weight = 2; // required + +} \ No newline at end of file diff --git a/node/libs/roles/src/validator/conv.rs b/node/libs/roles/src/validator/conv.rs index 9e616320..8070e1b6 100644 --- a/node/libs/roles/src/validator/conv.rs +++ b/node/libs/roles/src/validator/conv.rs @@ -1,8 +1,8 @@ use super::{ - AggregateSignature, BlockHeader, BlockNumber, CommitQC, ConsensusMsg, FinalBlock, Fork, - ForkNumber, Genesis, GenesisHash, LeaderCommit, LeaderPrepare, Msg, MsgHash, NetAddress, - Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, ReplicaCommit, - ReplicaPrepare, Signature, Signed, Signers, ValidatorSet, View, ViewNumber, + AggregateSignature, BlockHeader, BlockNumber, CommitQC, Committee, ConsensusMsg, FinalBlock, + Fork, ForkNumber, Genesis, GenesisHash, GenesisVersion, LeaderCommit, LeaderPrepare, Msg, + MsgHash, NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, + ReplicaCommit, ReplicaPrepare, Signature, Signed, Signers, View, ViewNumber, WeightedValidator, }; use crate::{node::SessionId, proto::validator as proto}; use anyhow::Context as _; @@ -27,25 +27,58 @@ impl ProtoFmt for Fork { } } +#[allow(deprecated)] impl ProtoFmt for Genesis { type Proto = proto::Genesis; fn read(r: &Self::Proto) -> anyhow::Result { - let validators: Vec<_> = r - .validators - .iter() - .enumerate() - .map(|(i, v)| PublicKey::read(v).context(i)) - .collect::>() - .context("validators")?; + let (validators, version) = + // current genesis encoding version 1 + if !r.validators_v1.is_empty() { + ( + r.validators_v1 + .iter() + .enumerate() + .map(|(i, v)| WeightedValidator::read(v).context(i)) + .collect::>() + .context("validators")?, + GenesisVersion(1), + ) + // legacy genesis encoding version 0 + } else if !r.validators.is_empty() { + ( + r.validators + .iter() + .enumerate() + .map(|(i, v)| anyhow::Ok(WeightedValidator { + key: PublicKey::read(v).context(i)?, + weight: 1, + })) + .collect::>() + .context("validators")?, + GenesisVersion(0), + ) + // empty validator set, Committee:new() will later return an error. + } else { + (vec![], GenesisVersion::CURRENT) + }; Ok(Self { fork: read_required(&r.fork).context("fork")?, - validators: ValidatorSet::new(validators.into_iter()).context("validators")?, + validators: Committee::new(validators.into_iter()).context("validators")?, + version, }) } fn build(&self) -> Self::Proto { - Self::Proto { - fork: Some(self.fork.build()), - validators: self.validators.iter().map(|x| x.build()).collect(), + match self.version { + GenesisVersion(0) => Self::Proto { + fork: Some(self.fork.build()), + validators: self.validators.iter().map(|v| v.key.build()).collect(), + validators_v1: vec![], + }, + GenesisVersion(1..) => Self::Proto { + fork: Some(self.fork.build()), + validators: vec![], + validators_v1: self.validators.iter().map(|v| v.build()).collect(), + }, } } } @@ -437,3 +470,21 @@ impl ProtoFmt for AggregateSignature { } } } + +impl ProtoFmt for WeightedValidator { + type Proto = proto::WeightedValidator; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + key: read_required(&r.key).context("key")?, + weight: *required(&r.weight).context("weight")?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + key: Some(self.key.build()), + weight: Some(self.weight), + } + } +} diff --git a/node/libs/roles/src/validator/messages/consensus.rs b/node/libs/roles/src/validator/messages/consensus.rs index 68a21b51..52639b9c 100644 --- a/node/libs/roles/src/validator/messages/consensus.rs +++ b/node/libs/roles/src/validator/messages/consensus.rs @@ -1,11 +1,9 @@ //! Messages related to the consensus protocol. use super::{BlockNumber, LeaderCommit, LeaderPrepare, Msg, ReplicaCommit, ReplicaPrepare}; use crate::validator; +use anyhow::Context; use bit_vec::BitVec; -use std::{ - collections::{BTreeMap, BTreeSet}, - fmt, -}; +use std::{collections::BTreeMap, fmt}; use zksync_consensus_crypto::{keccak256::Keccak256, ByteFmt, Text, TextFmt}; use zksync_consensus_utils::enum_util::{BadVariantError, Variant}; @@ -74,96 +72,146 @@ impl Default for Fork { /// A struct that represents a set of validators. It is used to store the current validator set. /// We represent each validator by its validator public key. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ValidatorSet { - vec: Vec, - map: BTreeMap, +#[derive(Clone, Debug, PartialEq, Eq, Default)] +pub struct Committee { + vec: Vec, + indexes: BTreeMap, + total_weight: u64, } -impl ValidatorSet { - /// Creates a new ValidatorSet from a list of validator public keys. - pub fn new(validators: impl IntoIterator) -> anyhow::Result { - let mut set = BTreeSet::new(); +impl Committee { + /// Creates a new Committee from a list of validator public keys. + pub fn new(validators: impl IntoIterator) -> anyhow::Result { + let mut weighted_validators = BTreeMap::new(); + let mut total_weight: u64 = 0; for validator in validators { - anyhow::ensure!(set.insert(validator), "Duplicate validator in ValidatorSet"); + anyhow::ensure!( + !weighted_validators.contains_key(&validator.key), + "Duplicate validator in validator Committee" + ); + anyhow::ensure!( + validator.weight > 0, + "Validator weight has to be a positive value" + ); + total_weight = total_weight + .checked_add(validator.weight) + .context("Sum of weights overflows in validator Committee")?; + weighted_validators.insert(validator.key.clone(), validator); } anyhow::ensure!( - !set.is_empty(), - "ValidatorSet must contain at least one validator" + !weighted_validators.is_empty(), + "Validator Committee must contain at least one validator" ); Ok(Self { - vec: set.iter().cloned().collect(), - map: set.into_iter().enumerate().map(|(i, pk)| (pk, i)).collect(), + vec: weighted_validators.values().cloned().collect(), + indexes: weighted_validators + .values() + .enumerate() + .map(|(i, v)| (v.key.clone(), i)) + .collect(), + total_weight, }) } - /// Iterates over validators. - pub fn iter(&self) -> impl Iterator { + /// Iterates over weighted validators. + pub fn iter(&self) -> impl Iterator { self.vec.iter() } + /// Iterates over validator keys. + pub fn iter_keys(&self) -> impl Iterator { + self.vec.iter().map(|v| &v.key) + } + /// Returns the number of validators. - #[allow(clippy::len_without_is_empty)] // a valid `ValidatorSet` is always non-empty by construction + #[allow(clippy::len_without_is_empty)] // a valid `Committee` is always non-empty by construction pub fn len(&self) -> usize { self.vec.len() } - /// Returns true if the given validator is in the validator set. + /// Returns true if the given validator is in the validator committee. pub fn contains(&self, validator: &validator::PublicKey) -> bool { - self.map.contains_key(validator) + self.indexes.contains_key(validator) } - /// Get validator by its index in the set. - pub fn get(&self, index: usize) -> Option<&validator::PublicKey> { + /// Get validator by its index in the committee. + pub fn get(&self, index: usize) -> Option<&WeightedValidator> { self.vec.get(index) } - /// Get the index of a validator in the set. + /// Get the index of a validator in the committee. pub fn index(&self, validator: &validator::PublicKey) -> Option { - self.map.get(validator).copied() + self.indexes.get(validator).copied() } - /// Computes the validator for the given view. + /// Computes the leader for the given view. pub fn view_leader(&self, view_number: ViewNumber) -> validator::PublicKey { let index = view_number.0 as usize % self.len(); - self.get(index).unwrap().clone() + self.get(index).unwrap().key.clone() + } + + /// Signature weight threshold for this validator committee. + pub fn threshold(&self) -> u64 { + threshold(self.total_weight()) } - /// Signature threshold for this validator set. - pub fn threshold(&self) -> usize { - threshold(self.len()) + /// Maximal weight of faulty replicas allowed in this validator committee. + pub fn max_faulty_weight(&self) -> u64 { + max_faulty_weight(self.total_weight()) } - /// Maximal number of faulty replicas allowed in this validator set. - pub fn faulty_replicas(&self) -> usize { - faulty_replicas(self.len()) + /// Compute the sum of signers weights. + /// Panics if signers length does not match the number of validators in committee + pub fn weight(&self, signers: &Signers) -> u64 { + assert_eq!(self.vec.len(), signers.len()); + self.vec + .iter() + .enumerate() + .filter(|(i, _)| signers.0[*i]) + .map(|(_, v)| v.weight) + .sum() + } + + /// Sum of all validators' weight in the committee + pub fn total_weight(&self) -> u64 { + self.total_weight } } -/// Calculate the consensus threshold, the minimum number of votes for any consensus action to be valid, -/// for a given number of replicas. -pub fn threshold(n: usize) -> usize { - n - faulty_replicas(n) +/// Calculate the consensus threshold, the minimum votes' weight for any consensus action to be valid, +/// for a given committee total weight. +pub fn threshold(total_weight: u64) -> u64 { + total_weight - max_faulty_weight(total_weight) } -/// Calculate the maximum number of faulty replicas, for a given number of replicas. -pub fn faulty_replicas(n: usize) -> usize { - // Calculate the allowed maximum number of faulty replicas. We want the following relationship to hold: +/// Calculate the maximum allowed weight for faulty replicas, for a given total weight. +pub fn max_faulty_weight(total_weight: u64) -> u64 { + // Calculate the allowed maximum weight of faulty replicas. We want the following relationship to hold: // n = 5*f + 1 - // for n total replicas and f faulty replicas. This results in the following formula for the maximum - // number of faulty replicas: + // for n total weight and f faulty weight. This results in the following formula for the maximum + // weight of faulty replicas: // f = floor((n - 1) / 5) - // Because of this, it doesn't make sense to have 5*f + 2 or 5*f + 3 replicas. It won't increase the number - // of allowed faulty replicas. - (n - 1) / 5 + (total_weight - 1) / 5 +} + +/// Version of the Genesis representation. +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct GenesisVersion(pub u32); + +impl GenesisVersion { + /// Version 0 - deprecated: Committee encoding does not account weights, assume weight=1 per validator + /// Version 1: Validators with weight within Committee + pub const CURRENT: Self = Self(1); } /// Genesis of the blockchain, unique for each blockchain instance. #[derive(Debug, Clone, PartialEq)] pub struct Genesis { // TODO(gprusak): add blockchain id here. + /// Genesis encoding version + pub version: GenesisVersion, /// Set of validators of the chain. - pub validators: ValidatorSet, + pub validators: Committee, /// Fork of the chain to follow. pub fork: Fork, } @@ -179,6 +227,16 @@ impl Genesis { } } +impl Default for Genesis { + fn default() -> Self { + Self { + version: GenesisVersion::CURRENT, + validators: Committee::default(), + fork: Fork::default(), + } + } +} + impl TextFmt for GenesisHash { fn decode(text: Text) -> anyhow::Result { text.strip("genesis_hash:keccak256:")? @@ -320,7 +378,7 @@ impl Signers { self.0.iter().filter(|b| *b).count() } - /// Size of the corresponding ValidatorSet. + /// Size of the corresponding validator Committee. pub fn len(&self) -> usize { self.0.len() } @@ -376,3 +434,12 @@ pub enum Phase { Prepare, Commit, } + +/// Validator representation inside a Committee. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct WeightedValidator { + /// Validator key + pub key: validator::PublicKey, + /// Validator weight inside the Committee. + pub weight: u64, +} diff --git a/node/libs/roles/src/validator/messages/leader_commit.rs b/node/libs/roles/src/validator/messages/leader_commit.rs index 1349e143..a83fa4ed 100644 --- a/node/libs/roles/src/validator/messages/leader_commit.rs +++ b/node/libs/roles/src/validator/messages/leader_commit.rs @@ -41,13 +41,13 @@ pub enum CommitQCVerifyError { /// Bad signer set. #[error("signers set doesn't match genesis")] BadSignersSet, - /// Not enough signers. - #[error("not enough signers: got {got}, want {want}")] + /// Weight not reached. + #[error("Signers have not reached threshold weight: got {got}, want {want}")] NotEnoughSigners { - /// Got signers. - got: usize, - /// Want signers. - want: usize, + /// Got weight. + got: u64, + /// Want weight. + want: u64, }, /// Bad signature. #[error("bad signature: {0:#}")] @@ -100,12 +100,12 @@ impl CommitQC { return Err(Error::BadSignersSet); } - // Verify that we have enough signers. - let num_signers = self.signers.count(); + // Verify the signers' weight is enough. + let weight = genesis.validators.weight(&self.signers); let threshold = genesis.validators.threshold(); - if num_signers < threshold { + if weight < threshold { return Err(Error::NotEnoughSigners { - got: num_signers, + got: weight, want: threshold, }); } @@ -113,7 +113,7 @@ impl CommitQC { // Now we can verify the signature. let messages_and_keys = genesis .validators - .iter() + .iter_keys() .enumerate() .filter(|(i, _)| self.signers.0[*i]) .map(|(_, pk)| (self.message.clone(), pk)); diff --git a/node/libs/roles/src/validator/messages/leader_prepare.rs b/node/libs/roles/src/validator/messages/leader_prepare.rs index 323cea6a..2e034ff1 100644 --- a/node/libs/roles/src/validator/messages/leader_prepare.rs +++ b/node/libs/roles/src/validator/messages/leader_prepare.rs @@ -30,13 +30,13 @@ pub enum PrepareQCVerifyError { /// Bad message format. #[error(transparent)] BadFormat(anyhow::Error), - /// Not enough signers. - #[error("not enough signers: got {got}, want {want}")] + /// Weight not reached. + #[error("Signers have not reached threshold weight: got {got}, want {want}")] NotEnoughSigners { - /// Got signers. - got: usize, - /// Want signers. - want: usize, + /// Got weight. + got: u64, + /// Want weight. + want: u64, }, /// Bad signature. #[error("bad signature: {0:#}")] @@ -56,14 +56,14 @@ impl PrepareQC { /// Get the highest block voted and check if there's a quorum of votes for it. To have a quorum /// in this situation, we require 2*f+1 votes, where f is the maximum number of faulty replicas. pub fn high_vote(&self, genesis: &Genesis) -> Option { - let mut count: HashMap<_, usize> = HashMap::new(); + let mut count: HashMap<_, u64> = HashMap::new(); for (msg, signers) in &self.map { if let Some(v) = &msg.high_vote { - *count.entry(v.proposal).or_default() += signers.count(); + *count.entry(v.proposal).or_default() += genesis.validators.weight(signers); } } // We only take one value from the iterator because there can only be at most one block with a quorum of 2f+1 votes. - let min = 2 * genesis.validators.faulty_replicas() + 1; + let min = 2 * genesis.validators.max_faulty_weight() + 1; count.into_iter().find(|x| x.1 >= min).map(|x| x.0) } @@ -101,6 +101,7 @@ impl PrepareQC { pub fn verify(&self, genesis: &Genesis) -> Result<(), PrepareQCVerifyError> { use PrepareQCVerifyError as Error; let mut sum = Signers::new(genesis.validators.len()); + // Check the ReplicaPrepare messages. for (i, (msg, signers)) in self.map.iter().enumerate() { if msg.view != self.view { @@ -126,11 +127,12 @@ impl PrepareQC { sum |= signers; } - // Verify that we have enough signers. + // Verify the signers' weight is enough. + let weight = genesis.validators.weight(&sum); let threshold = genesis.validators.threshold(); - if sum.count() < threshold { + if weight < threshold { return Err(Error::NotEnoughSigners { - got: sum.count(), + got: weight, want: threshold, }); } @@ -138,7 +140,7 @@ impl PrepareQC { let messages_and_keys = self.map.clone().into_iter().flat_map(|(msg, signers)| { genesis .validators - .iter() + .iter_keys() .enumerate() .filter(|(i, _)| signers.0[*i]) .map(|(_, pk)| (msg.clone(), pk)) @@ -149,6 +151,14 @@ impl PrepareQC { .verify_messages(messages_and_keys) .map_err(Error::BadSignature) } + + /// Calculates the weight of current PrepareQC signing validators + pub fn weight(&self, committee: &validator::Committee) -> u64 { + self.map + .values() + .map(|signers| committee.weight(signers)) + .sum() + } } /// A Prepare message from a leader. diff --git a/node/libs/roles/src/validator/messages/tests.rs b/node/libs/roles/src/validator/messages/tests.rs index 8ba950c2..ea2c8e8b 100644 --- a/node/libs/roles/src/validator/messages/tests.rs +++ b/node/libs/roles/src/validator/messages/tests.rs @@ -30,11 +30,29 @@ fn fork() -> Fork { } } -/// Hardcoded genesis. -fn genesis() -> Genesis { +/// Hardcoded v0 genesis. +fn genesis_v0() -> Genesis { Genesis { - validators: ValidatorSet::new(keys().iter().map(|k| k.public())).unwrap(), + validators: Committee::new(keys().iter().map(|k| WeightedValidator { + key: k.public(), + weight: 1, + })) + .unwrap(), fork: fork(), + version: GenesisVersion(0), + } +} + +/// Hardcoded v1 genesis. +fn genesis_v1() -> Genesis { + Genesis { + validators: Committee::new(keys().iter().map(|k| WeightedValidator { + key: k.public(), + weight: 1, + })) + .unwrap(), + fork: fork(), + version: GenesisVersion(1), } } @@ -52,13 +70,23 @@ fn payload_hash_change_detector() { /// Even if it was, ALL versions of genesis need to be supported FOREVER, /// unless we introduce dynamic regenesis. #[test] -fn genesis_hash_change_detector() { +fn genesis_v0_hash_change_detector() { let want: GenesisHash = Text::new( "genesis_hash:keccak256:9c9bfa303e8d2d451a7fadd327f5f1b957a37c84d7b27b9e1cf7b92fd83af7ae", ) .decode() .unwrap(); - assert_eq!(want, genesis().hash()); + assert_eq!(want, genesis_v0().hash()); +} + +#[test] +fn genesis_v1_hash_change_detector() { + let want: GenesisHash = Text::new( + "genesis_hash:keccak256:6370cfce637395629f05599082993c446c2c66145d440287a985ac98ad210b41", + ) + .decode() + .unwrap(); + assert_eq!(want, genesis_v1().hash()); } mod version1 { @@ -107,7 +135,7 @@ mod version1 { /// Hardcoded `CommitQC`. fn commit_qc() -> CommitQC { - let genesis = genesis(); + let genesis = genesis_v1(); let replica_commit = replica_commit(); let mut x = CommitQC::new(replica_commit.clone(), &genesis); for k in keys() { @@ -135,7 +163,7 @@ mod version1 { /// Hardcoded `PrepareQC`. fn prepare_qc() -> PrepareQC { let mut x = PrepareQC::new(view()); - let genesis = genesis(); + let genesis = genesis_v1(); let replica_prepare = replica_prepare(); for k in keys() { x.add(&k.sign_msg(replica_prepare.clone()), &genesis); diff --git a/node/libs/roles/src/validator/testonly.rs b/node/libs/roles/src/validator/testonly.rs index 660ece10..9af49731 100644 --- a/node/libs/roles/src/validator/testonly.rs +++ b/node/libs/roles/src/validator/testonly.rs @@ -1,9 +1,10 @@ //! Test-only utilities. use super::{ - AggregateSignature, BlockHeader, BlockNumber, CommitQC, ConsensusMsg, FinalBlock, Fork, - ForkNumber, Genesis, GenesisHash, LeaderCommit, LeaderPrepare, Msg, MsgHash, NetAddress, - Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, ReplicaCommit, - ReplicaPrepare, SecretKey, Signature, Signed, Signers, ValidatorSet, View, ViewNumber, + AggregateSignature, BlockHeader, BlockNumber, CommitQC, Committee, ConsensusMsg, FinalBlock, + Fork, ForkNumber, Genesis, GenesisHash, GenesisVersion, LeaderCommit, LeaderPrepare, Msg, + MsgHash, NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, + ReplicaCommit, ReplicaPrepare, SecretKey, Signature, Signed, Signers, View, ViewNumber, + WeightedValidator, }; use bit_vec::BitVec; use rand::{ @@ -20,11 +21,16 @@ pub struct Setup(SetupInner); impl Setup { /// New `Setup` with a given `fork`. - pub fn new_with_fork(rng: &mut impl Rng, validators: usize, fork: Fork) -> Self { - let keys: Vec = (0..validators).map(|_| rng.gen()).collect(); + pub fn new_with_fork(rng: &mut impl Rng, weights: Vec, fork: Fork) -> Self { + let keys: Vec = (0..weights.len()).map(|_| rng.gen()).collect(); let genesis = Genesis { - validators: ValidatorSet::new(keys.iter().map(|k| k.public())).unwrap(), + validators: Committee::new(keys.iter().enumerate().map(|(i, k)| WeightedValidator { + key: k.public(), + weight: weights[i], + })) + .unwrap(), fork, + ..Default::default() }; Self(SetupInner { keys, @@ -35,11 +41,16 @@ impl Setup { /// New `Setup`. pub fn new(rng: &mut impl Rng, validators: usize) -> Self { + Self::new_with_weights(rng, vec![1; validators]) + } + + /// New `Setup`. + pub fn new_with_weights(rng: &mut impl Rng, weights: Vec) -> Self { let fork = Fork { number: ForkNumber(rng.gen_range(0..100)), first_block: BlockNumber(rng.gen_range(0..100)), }; - Self::new_with_fork(rng, validators, fork) + Self::new_with_fork(rng, weights, fork) } /// Next block to finalize. @@ -191,10 +202,17 @@ impl Distribution for Standard { Genesis { validators: rng.gen(), fork: rng.gen(), + version: rng.gen(), } } } +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> GenesisVersion { + GenesisVersion(rng.gen_range(0..=GenesisVersion::CURRENT.0)) + } +} + impl Distribution for Standard { fn sample(&self, rng: &mut R) -> PayloadHash { PayloadHash(rng.gen()) @@ -292,11 +310,14 @@ impl Distribution for Standard { } } -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> ValidatorSet { +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Committee { let count = rng.gen_range(1..11); - let public_keys = (0..count).map(|_| rng.gen()); - ValidatorSet::new(public_keys).unwrap() + let public_keys = (0..count).map(|_| WeightedValidator { + key: rng.gen(), + weight: 1, + }); + Committee::new(public_keys).unwrap() } } diff --git a/node/libs/roles/src/validator/tests.rs b/node/libs/roles/src/validator/tests.rs index 5e837911..d9975ed0 100644 --- a/node/libs/roles/src/validator/tests.rs +++ b/node/libs/roles/src/validator/tests.rs @@ -200,12 +200,15 @@ fn test_commit_qc() { let ctx = ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); + // This will create equally weighted validators let setup1 = Setup::new(rng, 6); let setup2 = Setup::new(rng, 6); let genesis3 = Genesis { - validators: ValidatorSet::new(setup1.genesis.validators.iter().take(3).cloned()).unwrap(), + validators: Committee::new(setup1.genesis.validators.iter().take(3).cloned()).unwrap(), fork: setup1.genesis.fork.clone(), + ..Default::default() }; + let validator_weight = setup1.genesis.validators.total_weight() / 6; for i in 0..setup1.keys.len() + 1 { let view = rng.gen(); @@ -213,7 +216,8 @@ fn test_commit_qc() { for key in &setup1.keys[0..i] { qc.add(&key.sign_msg(qc.message.clone()), &setup1.genesis); } - if i >= setup1.genesis.validators.threshold() { + let expected_weight = i as u64 * validator_weight; + if expected_weight >= setup1.genesis.validators.threshold() { qc.verify(&setup1.genesis).unwrap(); } else { assert_matches!( @@ -234,11 +238,13 @@ fn test_prepare_qc() { let ctx = ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); + // This will create equally weighted validators let setup1 = Setup::new(rng, 6); let setup2 = Setup::new(rng, 6); let genesis3 = Genesis { - validators: ValidatorSet::new(setup1.genesis.validators.iter().take(3).cloned()).unwrap(), + validators: Committee::new(setup1.genesis.validators.iter().take(3).cloned()).unwrap(), fork: setup1.genesis.fork.clone(), + ..Default::default() }; let view: ViewNumber = rng.gen(); @@ -254,7 +260,8 @@ fn test_prepare_qc() { &setup1.genesis, ); } - if n >= setup1.genesis.validators.threshold() { + let expected_weight = n as u64 * setup1.genesis.validators.total_weight() / 6; + if expected_weight >= setup1.genesis.validators.threshold() { qc.verify(&setup1.genesis).unwrap(); } else { assert_matches!( @@ -268,3 +275,58 @@ fn test_prepare_qc() { assert!(qc.verify(&genesis3).is_err()); } } + +#[test] +fn test_validator_committee_weights() { + let ctx = ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + // Validators with non-uniform weights + let setup = Setup::new_with_weights(rng, vec![1000, 600, 800, 6000, 900, 700]); + // Expected sum of the validators weights + let sums = [1000, 1600, 2400, 8400, 9300, 10000]; + + let view: ViewNumber = rng.gen(); + let msg = make_replica_prepare(rng, view, &setup); + let mut qc = PrepareQC::new(msg.view.clone()); + for (n, weight) in sums.iter().enumerate() { + let key = &setup.keys[n]; + qc.add(&key.sign_msg(msg.clone()), &setup.genesis); + let signers = &qc.map[&msg]; + assert_eq!(setup.genesis.validators.weight(signers), *weight); + } +} + +#[test] +fn test_committee_weights_overflow_check() { + let ctx = ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + let validators: Vec = [u64::MAX / 5; 6] + .iter() + .map(|w| WeightedValidator { + key: rng.gen::().public(), + weight: *w, + }) + .collect(); + + // Creation should overflow + assert_matches!(Committee::new(validators), Err(_)); +} + +#[test] +fn test_committee_with_zero_weights() { + let ctx = ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + let validators: Vec = [1000, 0, 800, 6000, 0, 700] + .iter() + .map(|w| WeightedValidator { + key: rng.gen::().public(), + weight: *w, + }) + .collect(); + + // Committee creation should error on zero weight validators + assert_matches!(Committee::new(validators), Err(_)); +}